FastLED 3.9.15
Loading...
Searching...
No Matches
hash_map.h
Go to the documentation of this file.
1#pragma once
2
3/*
4HashMap that is optimized for embedded devices. The hashmap
5will store upto N elements inline, and will spill over to a heap.
6This hashmap will try not to grow by detecting during rehash that
7the number of tombstones is greater than the number of elements.
8This will keep the memory from growing during multiple inserts
9and removals.
10*/
11
12// #include <cstddef>
13// #include <iterator>
14
15#include "fl/assert.h"
16#include "fl/bitset.h"
17#include "fl/clamp.h"
18#include "fl/hash.h"
19#include "fl/map_range.h"
20#include "fl/optional.h"
21#include "fl/pair.h"
22#include "fl/type_traits.h"
23#include "fl/vector.h"
24#include "fl/warn.h"
25#include "fl/align.h"
26#include "fl/compiler_control.h"
27#include "fl/math_macros.h"
28
29#include "fl/compiler_control.h"
30
33
34namespace fl {
35
36// // begin using declarations for stl compatibility
37// use fl::equal_to;
38// use fl::hash_map;
39// use fl::unordered_map;
40// // end using declarations for stl compatibility
41
44template <typename T> struct EqualTo {
45 bool operator()(const T &a, const T &b) const { return a == b; }
46};
48
49// -- HashMap class
50// ------------------------------------------------------------- Begin HashMap
51// class
52
53#ifndef FASTLED_HASHMAP_INLINED_COUNT
54#define FASTLED_HASHMAP_INLINED_COUNT 8
55#endif
56
57template <typename Key, typename T, typename Hash = Hash<Key>,
58 typename KeyEqual = EqualTo<Key>,
59 int INLINED_COUNT = FASTLED_HASHMAP_INLINED_COUNT>
61 public:
63 HashMap(fl::size initial_capacity) : HashMap(initial_capacity, 0.7f) {}
64
65 HashMap(fl::size initial_capacity, float max_load)
66 : _buckets(next_power_of_two(initial_capacity)), _size(0),
67 _tombstones(0), _occupied(next_power_of_two(initial_capacity)),
68 _deleted(next_power_of_two(initial_capacity)) {
69 setLoadFactor(max_load);
70 }
71
72 void setLoadFactor(float f) {
73 f = fl::clamp(f, 0.f, 1.f);
74 mLoadFactor = fl::map_range<float, u8>(f, 0.f, 1.f, 0, 255);
75 }
76
77 // Iterator support.
78 struct iterator {
79 // Standard iterator typedefs
80 // using difference_type = std::ptrdiff_t;
81 using value_type = pair<const Key, T>; // Keep const Key as per standard
84 // using iterator_category = std::forward_iterator_tag;
85
86 iterator() : _map(nullptr), _idx(0) {}
87 iterator(HashMap *m, fl::size idx) : _map(m), _idx(idx) {
89 }
90
92 auto &e = _map->_buckets[_idx];
93 return {e.key, e.value};
94 }
95
97 // Use reinterpret_cast since pair<const Key, T> and pair<Key, T> are different types
98 // but have the same memory layout, then destroy/reconstruct to avoid assignment issues
99 using mutable_value_type = pair<Key, T>;
100 auto& mutable_cached = *reinterpret_cast<mutable_value_type*>(&_cached_value);
101 mutable_cached.~mutable_value_type();
102 new (&mutable_cached) mutable_value_type(operator*());
103 return &_cached_value;
104 }
105
107 // Use reinterpret_cast since pair<const Key, T> and pair<Key, T> are different types
108 // but have the same memory layout, then destroy/reconstruct to avoid assignment issues
109 using mutable_value_type = pair<Key, T>;
110 auto& mutable_cached = *fl::bit_cast<mutable_value_type*>(&_cached_value);
111 mutable_cached.~mutable_value_type();
112 new (&mutable_cached) mutable_value_type(operator*());
113 return &_cached_value;
114 }
115
117 ++_idx;
119 return *this;
120 }
121
123 iterator tmp = *this;
124 ++(*this);
125 return tmp;
126 }
127
128 bool operator==(const iterator &o) const {
129 return _map == o._map && _idx == o._idx;
130 }
131 bool operator!=(const iterator &o) const { return !(*this == o); }
132
134 if (!_map)
135 return;
136 fl::size cap = _map->_buckets.size();
137 while (_idx < cap && !_map->is_occupied(_idx))
138 ++_idx;
139 }
140
141 private:
143 fl::size _idx;
145 friend class HashMap;
146 };
147
149 // Standard iterator typedefs
150 // using difference_type = std::ptrdiff_t;
152 using pointer = const value_type *;
153 using reference = const value_type &;
154 // using iterator_category = std::forward_iterator_tag;
155
156 const_iterator() : _map(nullptr), _idx(0) {}
157 const_iterator(const HashMap *m, fl::size idx) : _map(m), _idx(idx) {
159 }
160 const_iterator(const iterator &it) : _map(it._map), _idx(it._idx) {}
161
163 auto &e = _map->_buckets[_idx];
164 return {e.key, e.value};
165 }
166
168 // Use reinterpret_cast since pair<const Key, T> and pair<Key, T> are different types
169 // but have the same memory layout, then destroy/reconstruct to avoid assignment issues
170 using mutable_value_type = pair<Key, T>;
171 auto& mutable_cached = *reinterpret_cast<mutable_value_type*>(&_cached_value);
172 mutable_cached.~mutable_value_type();
173 new (&mutable_cached) mutable_value_type(operator*());
174 return &_cached_value;
175 }
176
178 ++_idx;
180 return *this;
181 }
182
184 const_iterator tmp = *this;
185 ++(*this);
186 return tmp;
187 }
188
189 bool operator==(const const_iterator &o) const {
190 return _map == o._map && _idx == o._idx;
191 }
192 bool operator!=(const const_iterator &o) const { return !(*this == o); }
193
195 if (!_map)
196 return;
197 fl::size cap = _map->_buckets.size();
198 while (_idx < cap && !_map->is_occupied(_idx))
199 ++_idx;
200 }
201
202 friend class HashMap;
203
204 private:
205 const HashMap *_map;
206 fl::size _idx;
208 };
209
210 iterator begin() { return iterator(this, 0); }
211 iterator end() { return iterator(this, _buckets.size()); }
212 const_iterator begin() const { return const_iterator(this, 0); }
213 const_iterator end() const { return const_iterator(this, _buckets.size()); }
214 const_iterator cbegin() const { return const_iterator(this, 0); }
215 const_iterator cend() const {
216 return const_iterator(this, _buckets.size());
217 }
218
219 static bool NeedsRehash(fl::size size, fl::size bucket_size, fl::size tombstones,
220 u8 load_factor) {
221 // (size + tombstones) << 8 : multiply numerator by 256
222 // capacity * max_load : denominator * threshold
223 u32 lhs = (size + tombstones) << 8;
224 u32 rhs = (bucket_size * load_factor);
225 return lhs > rhs;
226 }
227
228 // returns true if (size + tombs)/capacity > _max_load/256
229 bool needs_rehash() const {
231 }
232
233 // insert or overwrite
234 void insert(const Key &key, const T &value) {
235 const bool will_rehash = needs_rehash();
236 if (will_rehash) {
237 // if half the buckets are tombstones, rehash inline to prevent
238 // memory spill over into the heap.
239 if (_tombstones > _size) {
241 } else {
242 rehash(_buckets.size() * 2);
243 }
244 }
245 fl::size idx;
246 bool is_new;
248 idx = p.first;
249 is_new = p.second;
250 if (is_new) {
251 _buckets[idx].key = key;
252 _buckets[idx].value = value;
253 mark_occupied(idx);
254 ++_size;
255 } else {
256 FASTLED_ASSERT(idx != npos(), "HashMap::insert: invalid index at "
257 << idx << " which is " << npos());
258 _buckets[idx].value = value;
259 }
260 }
261
262 // Move version of insert
263 void insert(Key &&key, T &&value) {
264 const bool will_rehash = needs_rehash();
265 if (will_rehash) {
266 // if half the buckets are tombstones, rehash inline to prevent
267 // memory spill over into the heap.
268 if (_tombstones > _size) {
270 } else {
271 rehash(_buckets.size() * 2);
272 }
273 }
274 fl::size idx;
275 bool is_new;
277 idx = p.first;
278 is_new = p.second;
279 if (is_new) {
280 _buckets[idx].key = fl::move(key);
281 _buckets[idx].value = fl::move(value);
282 mark_occupied(idx);
283 ++_size;
284 } else {
285 FASTLED_ASSERT(idx != npos(), "HashMap::insert: invalid index at "
286 << idx << " which is " << npos());
287 _buckets[idx].value = fl::move(value);
288 }
289 }
290
291 // remove key; returns true if removed
292 bool remove(const Key &key) {
293 auto idx = find_index(key);
294 if (idx == npos())
295 return false;
296 mark_deleted(idx);
297 --_size;
298 ++_tombstones;
299 return true;
300 }
301
302 bool erase(const Key &key) { return remove(key); }
303
304 // Iterator-based erase - more efficient when you already have the iterator position
305 iterator erase(iterator it) {
306 if (it == end() || it._map != this) {
307 return end(); // Invalid iterator
308 }
309
310 // Mark the current position as deleted
311 mark_deleted(it._idx);
312 --_size;
313 ++_tombstones;
314
315 // Advance to next valid element and return iterator to it
316 ++it._idx;
317 it.advance_to_occupied();
318 return it;
319 }
320
321 void clear() {
322 _buckets.assign(_buckets.size(), Entry{});
323 _occupied.reset();
324 _deleted.reset();
325 _size = _tombstones = 0;
326 }
327
328 // find pointer to value or nullptr
329 T *find_value(const Key &key) {
330 auto idx = find_index(key);
331 return idx == npos() ? nullptr : &_buckets[idx].value;
332 }
333
334 const T *find_value(const Key &key) const {
335 auto idx = find_index(key);
336 return idx == npos() ? nullptr : &_buckets[idx].value;
337 }
338
339 iterator find(const Key &key) {
340 auto idx = find_index(key);
341 return idx == npos() ? end() : iterator(this, idx);
342 }
343
344 const_iterator find(const Key &key) const {
345 auto idx = find_index(key);
346 return idx == npos() ? end() : const_iterator(this, idx);
347 }
348
349 bool contains(const Key &key) const {
350 auto idx = find_index(key);
351 return idx != npos();
352 }
353
354 // access or default-construct
355 T &operator[](const Key &key) {
356 fl::size idx;
357 bool is_new;
358
360 idx = p.first;
361 is_new = p.second;
362
363 // Check if find_slot failed to find a valid slot (HashMap is full)
364 if (idx == npos()) {
365 // Need to resize to make room
366 if (needs_rehash()) {
367 // if half the buckets are tombstones, rehash inline to prevent
368 // memory growth. Otherwise, double the size.
369 if (_tombstones >= _buckets.size() / 2) {
371 } else {
372 rehash(_buckets.size() * 2);
373 }
374 } else {
375 // Force a rehash with double size if needs_rehash() doesn't detect the issue
376 rehash(_buckets.size() * 2);
377 }
378
379 // Try find_slot again after resize
380 p = find_slot(key);
381 idx = p.first;
382 is_new = p.second;
383
384 // If still npos() after resize, something is seriously wrong
385 if (idx == npos()) {
386 // This should never happen after a successful resize
387 static T default_value{};
388 return default_value;
389 }
390 }
391
392 if (is_new) {
393 _buckets[idx].key = key;
394 _buckets[idx].value = T{};
395 mark_occupied(idx);
396 ++_size;
397 }
398 return _buckets[idx].value;
399 }
400
401 fl::size size() const { return _size; }
402 bool empty() const { return _size == 0; }
403 fl::size capacity() const { return _buckets.size(); }
404
405
406
407 private:
408 static fl::size npos() {
409 return static_cast<fl::size>(-1);
410 }
411
412 // Helper methods to check entry state
413 bool is_occupied(fl::size idx) const { return _occupied.test(idx); }
414
415 bool is_deleted(fl::size idx) const { return _deleted.test(idx); }
416
417 bool is_empty(fl::size idx) const {
418 return !is_occupied(idx) && !is_deleted(idx);
419 }
420
421 void mark_occupied(fl::size idx) {
422 _occupied.set(idx);
423 _deleted.reset(idx);
424 }
425
426 void mark_deleted(fl::size idx) {
427 _occupied.reset(idx);
428 _deleted.set(idx);
429 }
430
431 void mark_empty(fl::size idx) {
432 _occupied.reset(idx);
433 _deleted.reset(idx);
434 }
435
436 struct alignas(fl::max_align<Key, T>::value) Entry {
439 void swap(Entry &other) {
440 fl::swap(key, other.key);
441 fl::swap(value, other.value);
442 }
443 };
444
445 static fl::size next_power_of_two(fl::size n) {
446 fl::size p = 1;
447 while (p < n)
448 p <<= 1;
449 return p;
450 }
451
453 const fl::size cap = _buckets.size();
454 const fl::size mask = cap - 1;
455 const fl::size h = _hash(key) & mask;
456 fl::size first_tomb = npos();
457
458 if (cap <= 8) {
459 // linear probing
460 for (fl::size i = 0; i < cap; ++i) {
461 const fl::size idx = (h + i) & mask;
462
463 if (is_empty(idx))
464 return {first_tomb != npos() ? first_tomb : idx, true};
465 if (is_deleted(idx)) {
466 if (first_tomb == npos())
467 first_tomb = idx;
468 } else if (is_occupied(idx) && _equal(_buckets[idx].key, key)) {
469 return {idx, false};
470 }
471 }
472 } else {
473 // quadratic probing up to 8 tries
474 fl::size i = 0;
475 for (; i < 8; ++i) {
476 const fl::size idx = (h + i + i * i) & mask;
477
478 if (is_empty(idx))
479 return {first_tomb != npos() ? first_tomb : idx, true};
480 if (is_deleted(idx)) {
481 if (first_tomb == npos())
482 first_tomb = idx;
483 } else if (is_occupied(idx) && _equal(_buckets[idx].key, key)) {
484 return {idx, false};
485 }
486 }
487 // fallback to linear for the rest
488 for (; i < cap; ++i) {
489 const fl::size idx = (h + i) & mask;
490
491 if (is_empty(idx))
492 return {first_tomb != npos() ? first_tomb : idx, true};
493 if (is_deleted(idx)) {
494 if (first_tomb == npos())
495 first_tomb = idx;
496 } else if (is_occupied(idx) && _equal(_buckets[idx].key, key)) {
497 return {idx, false};
498 }
499 }
500 }
501
502 return {npos(), false};
503 }
504
505 enum {
508 };
509
510 fl::size find_index(const Key &key) const {
511 const fl::size cap = _buckets.size();
512 const fl::size mask = cap - 1;
513 const fl::size h = _hash(key) & mask;
514
515 if (cap <= kLinearProbingOnlySize) {
516 // linear probing
517 for (fl::size i = 0; i < cap; ++i) {
518 const fl::size idx = (h + i) & mask;
519 if (is_empty(idx))
520 return npos();
521 if (is_occupied(idx) && _equal(_buckets[idx].key, key))
522 return idx;
523 }
524 } else {
525 // quadratic probing up to 8 tries
526 fl::size i = 0;
527 for (; i < kQuadraticProbingTries; ++i) {
528 const fl::size idx = (h + i + i * i) & mask;
529 if (is_empty(idx))
530 return npos();
531 if (is_occupied(idx) && _equal(_buckets[idx].key, key))
532 return idx;
533 }
534 // fallback to linear for the rest
535 for (; i < cap; ++i) {
536 const fl::size idx = (h + i) & mask;
537 if (is_empty(idx))
538 return npos();
539 if (is_occupied(idx) && _equal(_buckets[idx].key, key))
540 return idx;
541 }
542 }
543
544 return npos();
545 }
546
548 const Key &key, const fl::bitset<1024> &occupied_set) const {
549 const fl::size cap = _buckets.size();
550 const fl::size mask = cap - 1;
551 const fl::size h = _hash(key) & mask;
552
553 if (cap <= kLinearProbingOnlySize) {
554 // linear probing
555 for (fl::size i = 0; i < cap; ++i) {
556 const fl::size idx = (h + i) & mask;
557 bool occupied = occupied_set.test(idx);
558 if (occupied) {
559 continue;
560 }
561 return idx;
562 }
563 } else {
564 // quadratic probing up to 8 tries
565 fl::size i = 0;
566 for (; i < kQuadraticProbingTries; ++i) {
567 const fl::size idx = (h + i + i * i) & mask;
568 bool occupied = occupied_set.test(idx);
569 if (occupied) {
570 continue;
571 }
572 return idx;
573 }
574 // fallback to linear for the rest
575 for (; i < cap; ++i) {
576 const fl::size idx = (h + i) & mask;
577 bool occupied = occupied_set.test(idx);
578 if (occupied) {
579 continue;
580 }
581 return idx;
582 }
583 }
584 return npos();
585 }
586
587 void rehash(fl::size new_cap) {
588 new_cap = next_power_of_two(new_cap);
590 fl::bitset<1024> old_occupied = _occupied;
591
592 _buckets.swap(old);
593 _buckets.clear();
594 _buckets.assign(new_cap, Entry{});
595
596 _occupied.reset();
597 _occupied.resize(new_cap);
598 _deleted.reset();
599 _deleted.resize(new_cap);
600
601 _size = _tombstones = 0;
602
603 for (fl::size i = 0; i < old.size(); i++) {
604 if (old_occupied.test(i))
605 insert(fl::move(old[i].key), fl::move(old[i].value));
606 }
607 }
608
609 // Rehash the inline buckets without resizing
613 // filter out tombstones and compact
614 fl::size cap = _buckets.size();
615 fl::size pos = 0;
616 // compact live elements to the left.
617 for (fl::size i = 0; i < cap; ++i) {
618 if (is_occupied(i)) {
619 if (pos != i) {
620 _buckets[pos] = _buckets[i];
621 mark_empty(i);
623 }
624 ++pos;
625 } else if (is_deleted(i)) {
626 mark_empty(i);
627 }
628 }
629
630 fl::bitset<1024> occupied; // Preallocate a bitset of size 1024
631 // swap the components, this will happen at most N times,
632 // use the occupied bitset to track which entries are occupied
633 // in the array rather than just copied in.
635 for (fl::size i = 0; i < _size; ++i) {
636 const bool already_finished = occupied.test(i);
637 if (already_finished) {
638 continue;
639 }
640 auto &e = _buckets[i];
641 // FASTLED_ASSERT(e.state == EntryState::Occupied,
642 // "HashMap::rehash_inline_no_resize: invalid
643 // state");
644
645 fl::size idx = find_unoccupied_index_using_bitset(e.key, occupied);
646 if (idx == npos()) {
647 // no more space
648 FASTLED_ASSERT(
649 false, "HashMap::rehash_inline_no_resize: invalid index at "
650 << idx << " which is " << npos());
651 return;
652 }
653 // if idx < pos then we are moving the entry to a new location
654 FASTLED_ASSERT(!tmp,
655 "HashMap::rehash_inline_no_resize: invalid tmp");
656 if (idx >= _size) {
657 // directly copy it now
658 _buckets[idx] = e;
659 continue;
660 }
661 tmp = e;
662 occupied.set(idx);
663 _buckets[idx] = *tmp.ptr();
664 while (!tmp.empty()) {
665 // we have to find a place for temp.
666 // find new position for tmp.
667 auto key = tmp.ptr()->key;
668 fl::size new_idx =
670 if (new_idx == npos()) {
671 // no more space
672 FASTLED_ASSERT(
673 false,
674 "HashMap::rehash_inline_no_resize: invalid index at "
675 << new_idx << " which is " << npos());
676 return;
677 }
678 occupied.set(new_idx);
679 if (new_idx < _size) {
680 // we have to swap the entry at new_idx with tmp
681 fl::optional<Entry> tmp2 = _buckets[new_idx];
682 _buckets[new_idx] = *tmp.ptr();
683 tmp = tmp2;
684 } else {
685 // we can just move tmp to new_idx
686 _buckets[new_idx] = *tmp.ptr();
687 tmp.reset();
688 }
689 }
690 FASTLED_ASSERT(
691 occupied.test(i),
692 "HashMap::rehash_inline_no_resize: invalid occupied at " << i);
693 FASTLED_ASSERT(
694 tmp.empty(), "HashMap::rehash_inline_no_resize: invalid tmp at " << i);
695 }
696 // Reset tombstones count since we've cleared all deleted entries
697 _tombstones = 0;
698 }
700
702 fl::size _size;
703 fl::size _tombstones;
705 fl::bitset<1024> _occupied;
706 fl::bitset<1024> _deleted;
708 KeyEqual _equal;
709};
710
711// begin using declarations for stl compatibility
712template <typename T> using equal_to = EqualTo<T>;
713
714template <typename Key, typename T, typename Hash = Hash<Key>,
715 typename KeyEqual = equal_to<Key>>
717
718template <typename Key, typename T, typename Hash = Hash<Key>,
719 typename KeyEqual = equal_to<Key>>
721// end using declarations for stl compatibility
722
723} // namespace fl
724
uint8_t pos
Definition Blur.ino:11
#define FL_ALIGN
Definition align.h:16
FL_DISABLE_WARNING_PUSH FL_DISABLE_WARNING_NULL_DEREFERENCE void rehash_inline_no_resize()
Definition hash_map.h:612
iterator end()
Definition hash_map.h:211
T & operator[](const Key &key)
Definition hash_map.h:355
static fl::size next_power_of_two(fl::size n)
Definition hash_map.h:445
bool erase(const Key &key)
Definition hash_map.h:302
const T * find_value(const Key &key) const
Definition hash_map.h:334
pair< fl::size, bool > find_slot(const Key &key) const
Definition hash_map.h:452
const_iterator cend() const
Definition hash_map.h:215
iterator begin()
Definition hash_map.h:210
void mark_empty(fl::size idx)
Definition hash_map.h:431
iterator erase(iterator it)
Definition hash_map.h:305
void clear()
Definition hash_map.h:321
T * find_value(const Key &key)
Definition hash_map.h:329
fl::size find_unoccupied_index_using_bitset(const Key &key, const fl::bitset< 1024 > &occupied_set) const
Definition hash_map.h:547
bool is_deleted(fl::size idx) const
Definition hash_map.h:415
bool is_occupied(fl::size idx) const
Definition hash_map.h:413
bool remove(const Key &key)
Definition hash_map.h:292
HashMap(fl::size initial_capacity, float max_load)
Definition hash_map.h:65
const_iterator end() const
Definition hash_map.h:213
fl::size find_index(const Key &key) const
Definition hash_map.h:510
bool empty() const
Definition hash_map.h:402
void rehash(fl::size new_cap)
Definition hash_map.h:587
const_iterator find(const Key &key) const
Definition hash_map.h:344
void insert(Key &&key, T &&value)
Definition hash_map.h:263
HashMap(fl::size initial_capacity)
Definition hash_map.h:63
fl::size capacity() const
Definition hash_map.h:403
static bool NeedsRehash(fl::size size, fl::size bucket_size, fl::size tombstones, u8 load_factor)
Definition hash_map.h:219
FL_DISABLE_WARNING_POP fl::vector_inlined< Entry, FASTLED_HASHMAP_INLINED_COUNT > _buckets
Definition hash_map.h:701
bool is_empty(fl::size idx) const
Definition hash_map.h:417
bool contains(const Key &key) const
Definition hash_map.h:349
const_iterator begin() const
Definition hash_map.h:212
const_iterator cbegin() const
Definition hash_map.h:214
void insert(const Key &key, const T &value)
Definition hash_map.h:234
bool needs_rehash() const
Definition hash_map.h:229
iterator find(const Key &key)
Definition hash_map.h:339
fl::size size() const
Definition vector.h:1008
bool empty() const
Definition optional.h:29
void reset()
Definition optional.h:34
T * ptr()
Definition optional.h:31
#define FL_DISABLE_WARNING_PUSH
#define FL_DISABLE_WARNING_SHORTEN_64_TO_32
#define FL_DISABLE_WARNING_NULL_DEREFERENCE
#define FL_DISABLE_WARNING_POP
#define FASTLED_HASHMAP_INLINED_COUNT
Definition hash_map.h:54
constexpr remove_reference< T >::type && move(T &&t) noexcept
Definition move.h:27
unsigned char u8
Definition int.h:17
void swap(array< T, N > &lhs, array< T, N > &rhs) noexcept(noexcept(lhs.swap(rhs)))
Definition array.h:156
To bit_cast(const From &from) noexcept
Definition bit_cast.h:39
InlinedVector< T, INLINED_SIZE > vector_inlined
Definition vector.h:1220
Optional< T > optional
Definition optional.h:14
FASTLED_FORCE_INLINE T clamp(T value, T min, T max)
Definition clamp.h:10
HashMap< Key, T, Hash, KeyEqual > hash_map
Definition hash_map.h:716
EqualTo< T > equal_to
Definition hash_map.h:712
HashMap< Key, T, Hash, KeyEqual > unordered_map
Definition hash_map.h:720
IMPORTANT!
Definition crgb.h:20
Definition Keyboard.h:22
bool operator()(const T &a, const T &b) const
Definition hash_map.h:45
T value
Definition hash_map.h:438
void swap(Entry &other)
Definition hash_map.h:439
Key key
Definition hash_map.h:437
Definition hash_map.h:436
const_iterator operator++(int)
Definition hash_map.h:183
const value_type * pointer
Definition hash_map.h:152
const_iterator(const HashMap *m, fl::size idx)
Definition hash_map.h:157
const_iterator & operator++()
Definition hash_map.h:177
bool operator==(const const_iterator &o) const
Definition hash_map.h:189
value_type operator*() const
Definition hash_map.h:162
const_iterator(const iterator &it)
Definition hash_map.h:160
pair< const Key, T > value_type
Definition hash_map.h:151
bool operator!=(const const_iterator &o) const
Definition hash_map.h:192
const value_type & reference
Definition hash_map.h:153
pointer operator->() const
Definition hash_map.h:167
pointer operator->() const
Definition hash_map.h:106
value_type & reference
Definition hash_map.h:83
iterator operator++(int)
Definition hash_map.h:122
pointer operator->()
Definition hash_map.h:96
pair< const Key, T > value_type
Definition hash_map.h:81
iterator & operator++()
Definition hash_map.h:116
value_type operator*() const
Definition hash_map.h:91
value_type _cached_value
Definition hash_map.h:144
bool operator!=(const iterator &o) const
Definition hash_map.h:131
bool operator==(const iterator &o) const
Definition hash_map.h:128
value_type * pointer
Definition hash_map.h:82
friend class HashMap
Definition hash_map.h:145
iterator(HashMap *m, fl::size idx)
Definition hash_map.h:87
T1 first
Definition pair.h:14
T2 second
Definition pair.h:15
Definition pair.h:9