memcache.go 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. package cache
  2. import (
  3. "container/list"
  4. "sync"
  5. "sync/atomic"
  6. //"fmt"
  7. )
  8. // An AtomicInt is an int64 to be accessed atomically.
  9. type AtomicInt int64
  10. // MemCache is an LRU cache. It is safe for concurrent access.
  11. type MemCache struct {
  12. mutex sync.RWMutex
  13. maxItemSize int
  14. cacheList *list.List
  15. cache map[interface{}]*list.Element
  16. hits, gets AtomicInt
  17. }
  18. type entry struct {
  19. key interface{}
  20. value interface{}
  21. }
  22. // NewMemCache If maxItemSize is zero, the cache has no limit.
  23. //if maxItemSize is not zero, when cache's size beyond maxItemSize,start to swap
  24. func NewMemCache(maxItemSize int) *MemCache {
  25. return &MemCache{
  26. maxItemSize: maxItemSize,
  27. cacheList: list.New(),
  28. cache: make(map[interface{}]*list.Element),
  29. }
  30. }
  31. // Status return the status of cache
  32. func (c *MemCache) Status() *CacheStatus {
  33. c.mutex.RLock()
  34. defer c.mutex.RUnlock()
  35. return &CacheStatus{
  36. MaxItemSize: c.maxItemSize,
  37. CurrentSize: c.cacheList.Len(),
  38. Gets: c.gets.Get(),
  39. Hits: c.hits.Get(),
  40. }
  41. }
  42. //Get value with key
  43. func (c *MemCache) Get(key string) (interface{}, bool) {
  44. c.mutex.RLock()
  45. defer c.mutex.RUnlock()
  46. c.gets.Add(1)
  47. if ele, hit := c.cache[key]; hit {
  48. c.hits.Add(1)
  49. return ele.Value.(*entry).value, true
  50. }
  51. return nil, false
  52. }
  53. //Set a value with key
  54. func (c *MemCache) Set(key string, value interface{}) {
  55. c.mutex.Lock()
  56. defer c.mutex.Unlock()
  57. if c.cache == nil {
  58. c.cache = make(map[interface{}]*list.Element)
  59. c.cacheList = list.New()
  60. }
  61. if ele, ok := c.cache[key]; ok {
  62. c.cacheList.MoveToFront(ele)
  63. ele.Value.(*entry).value = value
  64. return
  65. }
  66. ele := c.cacheList.PushFront(&entry{key: key, value: value})
  67. c.cache[key] = ele
  68. if c.maxItemSize != 0 && c.cacheList.Len() > c.maxItemSize {
  69. c.RemoveOldest()
  70. }
  71. }
  72. // Delete a value with key
  73. func (c *MemCache) Delete(key string) {
  74. c.mutex.Lock()
  75. defer c.mutex.Unlock()
  76. if c.cache == nil {
  77. return
  78. }
  79. if ele, ok := c.cache[key]; ok {
  80. c.cacheList.Remove(ele)
  81. key := ele.Value.(*entry).key
  82. delete(c.cache, key)
  83. return
  84. }
  85. }
  86. // RemoveOldest remove oldest key
  87. func (c *MemCache) RemoveOldest() {
  88. if c.cache == nil {
  89. return
  90. }
  91. ele := c.cacheList.Back()
  92. if ele != nil {
  93. c.cacheList.Remove(ele)
  94. key := ele.Value.(*entry).key
  95. delete(c.cache, key)
  96. }
  97. }
  98. // Add atomically adds n to i.
  99. func (i *AtomicInt) Add(n int64) {
  100. atomic.AddInt64((*int64)(i), n)
  101. }
  102. // Get atomically gets the value of i.
  103. func (i *AtomicInt) Get() int64 {
  104. return atomic.LoadInt64((*int64)(i))
  105. }