Eli's Blog

1. LRU

Least Recently Used, 最近最少使用缓存。目的:淘汰最长时间未被使用的元素

  • 快速存取原始
  • 固定的最大容量,不会无限制增长
  • 达到最大容量后,新增元素时,会把最近最少使用的元素删除,再放入新元素

1.1 示例代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
type Entry struct {
Key string
Value interface{}
pre *Entry
next *Entry
}

type Cache struct {
cache map[string]*Entry
capacity int
head *Entry
tail *Entry
}

func newCache(cap int) *Cache {
return &Cache{cache: make(map[string]*Entry), capacity: cap}
}

var lock sync.RWMutex

func (cache *Cache) Put(key string, val interface{}) interface{} {
lock.Lock()
defer lock.Unlock()

if existVal, exist := cache.cache[key]; exist {
cache.moveToHead(existVal)
return nil
}

// 重设 head 元素
e := &Entry{Key: key, Value: val, next: cache.head}
if cache.head != nil {
cache.head.pre = e
}
cache.head = e

// 第一次新增元素时,tail 为 nil
if cache.tail == nil {
cache.tail = e
}

cache.cache[key] = e

if len(cache.cache) <= cache.capacity {
return nil
}

// 处理超出容量范围的元素
removedEntry := cache.tail
cache.tail = cache.tail.pre
removedEntry.pre = nil
cache.tail.next = nil

delete(cache.cache, removedEntry.Key)
return removedEntry.Value
}

func (cache *Cache) Get(key string) interface{} {
lock.Lock()
defer lock.Unlock()

if existVal, exist := cache.cache[key]; exist {
cache.moveToHead(existVal)
return existVal.Value
}

return nil
}

func (cache *Cache) moveToHead(e *Entry) {
if e == cache.head {
return
}

// 从link中断开,并连接前后元素
e.pre.next = e.next
if e == cache.tail {
cache.tail = e.pre
} else {
e.next.pre = e.pre
}

e.pre = nil
e.next = cache.head
cache.head.pre = e
cache.head = e
}

func main() {
cache := newCache(2)

cache.Put("1", "Golang")
fmt.Println(cache.Get("1"))

cache.Put("2", "Python")
fmt.Println(cache.Get("1"))

cache.Put("3", "Java")
fmt.Println(cache.Get("1"))

fmt.Println(cache.Get("2")) // nil
fmt.Println(cache.Get("3")) // Java
}

1.2 完整代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
type LinkNode struct {
key, val int
pre, next *LinkNode
}

type LRUCache struct {
m map[int]*LinkNode
cap int
head, tail *LinkNode
}

func Constructor(capacity int) LRUCache {
head := &LinkNode{0, 0, nil, nil}
tail := &LinkNode{0, 0, nil, nil}
head.next = tail
tail.pre = head
return LRUCache{make(map[int]*LinkNode), capacity, head, tail}
}

func (this *LRUCache) Get(key int) int {
cache := this.m
if v, exist := cache[key]; exist {
this.MoveToHead(v)
return v.val
} else {
return -1
}
}

func (this *LRUCache) RemoveNode(node *LinkNode) {
node.pre.next = node.next
node.next.pre = node.pre
}

func (this *LRUCache) AddNode(node *LinkNode) {
head := this.head
node.next = head.next
head.next.pre = node
node.pre = head
head.next = node
}

func (this *LRUCache) MoveToHead(node *LinkNode) {
this.RemoveNode(node)
this.AddNode(node)
}

func (this *LRUCache) Put(key int, value int) {
tail := this.tail
cache := this.m
if v, exist := cache[key]; exist {
v.val = value
this.MoveToHead(v)
} else {
v := &LinkNode{key, value, nil, nil}
if len(cache) == this.cap {
delete(cache, tail.pre.key)
this.RemoveNode(tail.pre)
}
this.AddNode(v)
cache[key] = v
}
}

2. LFU

LFU(Least Frequently Used)算法根据数据的历史访问频率来淘汰数据,其核心思想是“如果数据过去被访问多次,那么将来被访问的频率也更高”。LFU的每个数据块都有一个引用计数,所有数据块按照引用计数排序,具有相同引用计数的数据块则按照时间排序。LFU需要记录所有数据的访问记录,内存消耗较高;需要基于引用计数排序,性能消耗较高。在算法实现复杂度上,LFU要远大于LRU。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
package main

type LFUCache struct {
cache map[int]*Node
freq map[int]*DoubleList
ncap, size, minFreq int
}

func (this *LFUCache) IncrFreq(node *Node) {
_freq := node.freq
this.freq[_freq].RemoveNode(node)
if this.minFreq == _freq && this.freq[_freq].IsEmpty() {
this.minFreq++
delete(this.freq, _freq)
}
node.freq++

if this.freq[node.freq] == nil {
this.freq[node.freq] = createDL()
}
this.freq[node.freq].AddFirst(node)
}

func Constructor(capacity int) LFUCache {
return LFUCache{
cache: make(map[int]*Node),
freq: make(map[int]*DoubleList),
ncap: capacity,
}
}

func (this *LFUCache) Get(key int) int {
if node, ok := this.cache[key]; ok {
this.IncrFreq(node)
return node.val
}

return -1
}

func (this *LFUCache) Put(key int, value int) {
if this.ncap == 0 {
return
}
//节点存在
if node, ok := this.cache[key]; ok {
node.val = value
this.IncrFreq(node)
} else {
if this.size >= this.ncap {
node := this.freq[this.minFreq].RemoveLast()
delete(this.cache, node.key)
this.size--
}
x := &Node{key: key, val: value, freq: 1}
this.cache[key] = x
if this.freq[1] == nil {
this.freq[1] = createDL()
}
this.freq[1].AddFirst(x)
this.minFreq = 1
this.size++
}
}

//节点node
type Node struct {
key, val, freq int
prev, next *Node
}

//双链表
type DoubleList struct {
tail, head *Node
}

//创建一个双链表
func createDL() *DoubleList {
head, tail := &Node{}, &Node{}
head.next, tail.prev = tail, head

return &DoubleList{
tail: tail,
head: head,
}
}

func (this *DoubleList) IsEmpty() bool {
return this.head.next == this.tail
}

//将node添加为双链表的第一个元素
func (this *DoubleList) AddFirst(node *Node) {
node.next = this.head.next
node.prev = this.head

this.head.next.prev = node
this.head.next = node
}

func (this *DoubleList) RemoveNode(node *Node) {
node.next.prev = node.prev
node.prev.next = node.next

node.next = nil
node.prev = nil
}

func (this *DoubleList) RemoveLast() *Node {
if this.IsEmpty() {
return nil
}

lastNode := this.tail.prev
this.RemoveNode(lastNode)

return lastNode
}

 上一页