432. All O`one Data Structure
At a Glance
- Topic: system-design-coding
- Pattern: Doubly linked list of count buckets + hash maps (adjacent to 460. LFU / 146. LRU thinking — frequency isolation + ordering)
- Difficulty: Hard
- Companies: Google, Amazon, Meta, Snap, DoorDash
- Frequency: Low
- LeetCode: 432
Problem (one-liner)
Design a structure supporting inc(key), dec(key), getMaxKey(), and getMinKey() — all O(1) average time — where keys are strings and counts stay nonnegative (dec removes key at count 0).
Recognition Cues
- Global min and max frequency at any time — needs ordered buckets of keys sharing the same count
- Increment/decrement must migrate keys between adjacent count levels without scanning all keys
- Strong hint: DLL of frequency buckets +
key -> countmap +count -> *countBucketmap
Diagram
At-a-glance flow (replace with problem-specific Mermaid as you refine this note). camelCase node IDs; no spaces in IDs.
Loading diagram…
Approaches
- Single hash count + heap — heap updates on every
inc/dec—O(log n)per op. - Sorted map of counts — tree map — same bottleneck.
- Optimal — DLL whose nodes hold
countand a set of keys at that count; splice nodes as counts appear/disappear.
Optimal Solution
Data structure layout (read this first)
keyToCount:string -> intcurrent frequency for every live key.countToBucket:int -> *countBucketlocates the DLL node for frequencyvalueinO(1).countBucket: containscount int,keys map[string]struct{}, andprev/nextpointers forming a doubly linked list of distinct count values sorted ascending (head = global minimum frequency among nonempty buckets, tail = maximum).inc/dec: move a key from bucketctoc±1, creating or deleting bucket nodes when a count level empties.getMinKey/getMaxKey: pick any key from the head bucket’s set or tail bucket’s set.
HEAD ⇄ [count=1: {a,c}] ⇄ [count=3: {b}] ⇄ [count=7: {x,z}] ⇄ TAILGo
package main
type countBucket struct {
count int
keys map[string]struct{}
prev *countBucket
next *countBucket
}
type AllOne struct {
head *countBucket
tail *countBucket
keyToCount map[string]int
countToNode map[int]*countBucket
}
func Constructor() AllOne {
head := &countBucket{keys: map[string]struct{}{}}
tail := &countBucket{keys: map[string]struct{}{}}
head.next = tail
tail.prev = head
return AllOne{
head: head,
tail: tail,
keyToCount: make(map[string]int),
countToNode: make(map[int]*countBucket),
}
}
func (structure *AllOne) unlinkBucket(node *countBucket) {
node.prev.next = node.next
node.next.prev = node.prev
delete(structure.countToNode, node.count)
}
func (structure *AllOne) spliceAfter(anchor *countBucket, count int, key string) {
bucket := &countBucket{
count: count,
keys: map[string]struct{}{key: {}},
}
bucket.prev = anchor
bucket.next = anchor.next
anchor.next.prev = bucket
anchor.next = bucket
structure.countToNode[count] = bucket
}
func (structure *AllOne) Inc(key string) {
previousCount := structure.keyToCount[key]
var anchor *countBucket
if previousCount > 0 {
node := structure.countToNode[previousCount]
delete(node.keys, key)
if len(node.keys) == 0 {
anchor = node.prev
structure.unlinkBucket(node)
} else {
anchor = node
}
} else {
anchor = structure.head
}
newCount := previousCount + 1
structure.keyToCount[key] = newCount
if existing, exists := structure.countToNode[newCount]; exists {
existing.keys[key] = struct{}{}
return
}
structure.spliceAfter(anchor, newCount, key)
}
func (structure *AllOne) Dec(key string) {
previousCount := structure.keyToCount[key]
node := structure.countToNode[previousCount]
delete(node.keys, key)
var anchor *countBucket
if len(node.keys) == 0 {
anchor = node.prev
structure.unlinkBucket(node)
} else {
anchor = node.prev
}
if previousCount == 1 {
delete(structure.keyToCount, key)
return
}
newCount := previousCount - 1
structure.keyToCount[key] = newCount
if existing, exists := structure.countToNode[newCount]; exists {
existing.keys[key] = struct{}{}
return
}
structure.spliceAfter(anchor, newCount, key)
}
func (structure *AllOne) GetMaxKey() string {
bucket := structure.tail.prev
if bucket == structure.head {
return ""
}
for word := range bucket.keys {
return word
}
return ""
}
func (structure *AllOne) GetMinKey() string {
bucket := structure.head.next
if bucket == structure.tail {
return ""
}
for word := range bucket.keys {
return word
}
return ""
}JavaScript
class CountBucket {
constructor(count) {
this.count = count;
this.keys = new Set();
this.prev = null;
this.next = null;
}
}
var AllOne = function () {
this.head = new CountBucket(0);
this.tail = new CountBucket(0);
this.head.next = this.tail;
this.tail.prev = this.head;
this.keyToCount = new Map();
this.countToBucket = new Map();
};
AllOne.prototype._unlink = function (bucket) {
bucket.prev.next = bucket.next;
bucket.next.prev = bucket.prev;
this.countToBucket.delete(bucket.count);
};
AllOne.prototype._spliceAfter = function (anchor, count, key) {
const bucket = new CountBucket(count);
bucket.keys.add(key);
bucket.prev = anchor;
bucket.next = anchor.next;
anchor.next.prev = bucket;
anchor.next = bucket;
this.countToBucket.set(count, bucket);
};
AllOne.prototype.inc = function (key) {
const previousCount = this.keyToCount.get(key) || 0;
let anchor;
if (previousCount > 0) {
const oldBucket = this.countToBucket.get(previousCount);
oldBucket.keys.delete(key);
if (oldBucket.keys.size === 0) {
anchor = oldBucket.prev;
this._unlink(oldBucket);
} else {
anchor = oldBucket;
}
} else {
anchor = this.head;
}
const newCount = previousCount + 1;
this.keyToCount.set(key, newCount);
if (this.countToBucket.has(newCount)) {
this.countToBucket.get(newCount).keys.add(key);
return;
}
this._spliceAfter(anchor, newCount, key);
};
AllOne.prototype.dec = function (key) {
const previousCount = this.keyToCount.get(key);
const oldBucket = this.countToBucket.get(previousCount);
oldBucket.keys.delete(key);
let anchor;
if (oldBucket.keys.size === 0) {
anchor = oldBucket.prev;
this._unlink(oldBucket);
} else {
anchor = oldBucket.prev;
}
if (previousCount === 1) {
this.keyToCount.delete(key);
return;
}
const newCount = previousCount - 1;
this.keyToCount.set(key, newCount);
if (this.countToBucket.has(newCount)) {
this.countToBucket.get(newCount).keys.add(key);
return;
}
this._spliceAfter(anchor, newCount, key);
};
AllOne.prototype.getMaxKey = function () {
const bucket = this.tail.prev;
if (bucket === this.head) {
return '';
}
for (const word of bucket.keys) {
return word;
}
return '';
};
AllOne.prototype.getMinKey = function () {
const bucket = this.head.next;
if (bucket === this.tail) {
return '';
}
for (const word of bucket.keys) {
return word;
}
return '';
};Walkthrough
inc("a"), inc("b"), inc("a"): counts a=2, b=1. Bucket list 1:{b} → 2:{a}; min=b, max=a. dec("a"): a moves to count 1; two keys at 1 — min could be either.
Invariant: DLL spans contiguous count levels present in the structure; each key appears in exactly one bucket matching keyToCount.
Edge Cases
- Empty structure —
getMinKey/getMaxKeyreturn"" - Single key — min and max identical
- Many keys share same count — set arbitrary choice is fine
decremoves key entirely at count1
Pitfalls
- Inserting a new count bucket at wrong anchor — breaks sort order of DLL
- Leaving empty buckets attached — min/max walk hits empty sets
- Off-by-one when linking after removed bucket vs before next count
Similar Problems
- 460. LFU Cache — per-key frequency lists +
minFrequencypointer — Hard sibling - 146. LRU Cache — DLL hygiene without frequency buckets — foundational Medium
- 347. Top K Frequent Elements — frequency-first thinking — Medium cross-topic
Variants
- Range query — “sum counts in
[low,high]” → augment buckets with subtree sums or Fenwick over discrete counts - TTL decay — periodic sweep compressing counts (batch bucket merge)
- Concurrent access — one big lock vs per-bucket locks (livelock hazards on splice)
Mind-Map Tags
#doubly-linked-list #frequency-bucket #hash-map #min-max-tracking #o1-amortized
Last updated on
Spotted something unclear or wrong on this page?