memcache.go 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. package cache
  2. import (
  3. "container/list"
  4. "sync"
  5. "sync/atomic"
  6. )
  7. // An AtomicInt is an int64 to be accessed atomically.
  8. type AtomicInt int64
  9. // MemCache is an LRU cache. It is safe for concurrent access.
  10. type MemCache struct {
  11. hits, gets AtomicInt
  12. mutex sync.RWMutex
  13. maxItemSize int
  14. cacheList *list.List
  15. cache map[interface{}]*list.Element
  16. }
  17. type entry struct {
  18. key interface{}
  19. value interface{}
  20. }
  21. // NewMemCache If maxItemSize is zero, the cache has no limit.
  22. //if maxItemSize is not zero, when cache's size beyond maxItemSize,start to swap
  23. func NewMemCache(maxItemSize int) *MemCache {
  24. return &MemCache{
  25. maxItemSize: maxItemSize,
  26. cacheList: list.New(),
  27. cache: make(map[interface{}]*list.Element),
  28. hits: 0,
  29. gets: 0,
  30. }
  31. }
  32. // Status return the status of cache
  33. func (c *MemCache) Status() *CacheStatus {
  34. c.mutex.RLock()
  35. defer c.mutex.RUnlock()
  36. return &CacheStatus{
  37. MaxItemSize: c.maxItemSize,
  38. CurrentSize: c.cacheList.Len(),
  39. Gets: c.gets.Get(),
  40. Hits: c.hits.Get(),
  41. }
  42. }
  43. //Get value with key
  44. func (c *MemCache) Get(key string) (interface{}, bool) {
  45. c.mutex.RLock()
  46. defer c.mutex.RUnlock()
  47. c.gets.Add(1)
  48. if ele, hit := c.cache[key]; hit {
  49. c.hits.Add(1)
  50. return ele.Value.(*entry).value, true
  51. }
  52. return nil, false
  53. }
  54. //Set a value with key
  55. func (c *MemCache) Set(key string, value interface{}) {
  56. c.mutex.Lock()
  57. defer c.mutex.Unlock()
  58. if c.cache == nil {
  59. c.cache = make(map[interface{}]*list.Element)
  60. c.cacheList = list.New()
  61. }
  62. if ele, ok := c.cache[key]; ok {
  63. c.cacheList.MoveToFront(ele)
  64. ele.Value.(*entry).value = value
  65. return
  66. }
  67. ele := c.cacheList.PushFront(&entry{key: key, value: value})
  68. c.cache[key] = ele
  69. if c.maxItemSize != 0 && c.cacheList.Len() > c.maxItemSize {
  70. c.RemoveOldest()
  71. }
  72. }
  73. // Delete a value with key
  74. func (c *MemCache) Delete(key string) {
  75. c.mutex.Lock()
  76. defer c.mutex.Unlock()
  77. if c.cache == nil {
  78. return
  79. }
  80. if ele, ok := c.cache[key]; ok {
  81. c.cacheList.Remove(ele)
  82. key := ele.Value.(*entry).key
  83. delete(c.cache, key)
  84. return
  85. }
  86. }
  87. // RemoveOldest remove oldest key
  88. func (c *MemCache) RemoveOldest() {
  89. if c.cache == nil {
  90. return
  91. }
  92. ele := c.cacheList.Back()
  93. if ele != nil {
  94. c.cacheList.Remove(ele)
  95. key := ele.Value.(*entry).key
  96. delete(c.cache, key)
  97. }
  98. }
  99. // Add atomically adds n to i.
  100. func (i *AtomicInt) Add(n int64) {
  101. atomic.AddInt64((*int64)(i), n)
  102. }
  103. // Get atomically gets the value of i.
  104. func (i *AtomicInt) Get() int64 {
  105. return atomic.LoadInt64((*int64)(i))
  106. }