memcache.go 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. package cache
  2. import (
  3. "container/list"
  4. "sync"
  5. "sync/atomic"
  6. //"fmt"
  7. )
  8. // An AtomicInt is an int64 to be accessed atomically.
  9. type AtomicInt int64
  10. // MemCache is an LRU cache. It is safe for concurrent access.
  11. type MemCache struct {
  12. mutex sync.RWMutex
  13. maxItemSize int
  14. cacheList *list.List
  15. cache map[interface{}]*list.Element
  16. hits, gets AtomicInt
  17. }
  18. type entry struct {
  19. key interface{}
  20. value interface{}
  21. }
  22. // If maxItemSize is zero, the cache has no limit.
  23. //if maxItemSize is not zero, when cache's size beyond maxItemSize,start to swap
  24. func NewMemCache(maxItemSize int) *MemCache {
  25. return &MemCache{
  26. maxItemSize: maxItemSize,
  27. cacheList: list.New(),
  28. cache: make(map[interface{}]*list.Element),
  29. }
  30. }
  31. //return the status of cache
  32. func (c *MemCache) Status() *CacheStatus{
  33. c.mutex.RLock()
  34. defer c.mutex.RUnlock()
  35. return &CacheStatus{
  36. MaxItemSize: c.maxItemSize,
  37. CurrentSize: c.cacheList.Len(),
  38. Gets: c.gets.Get(),
  39. Hits: c.hits.Get(),
  40. }
  41. }
  42. //get value with key
  43. func (c *MemCache) Get(key string) (interface{}, bool) {
  44. c.mutex.RLock()
  45. defer c.mutex.RUnlock()
  46. c.gets.Add(1)
  47. if ele, hit := c.cache[key]; hit {
  48. c.hits.Add(1)
  49. return ele.Value.(*entry).value, true
  50. }
  51. return nil, false
  52. }
  53. //set a value with key
  54. func (c *MemCache) Set(key string, value interface{}) {
  55. c.mutex.Lock()
  56. defer c.mutex.Unlock()
  57. if c.cache == nil {
  58. c.cache = make(map[interface{}]*list.Element)
  59. c.cacheList = list.New()
  60. }
  61. if ele, ok := c.cache[key]; ok {
  62. c.cacheList.MoveToFront(ele)
  63. ele.Value.(*entry).value = value
  64. return
  65. }
  66. ele := c.cacheList.PushFront(&entry{key: key, value: value})
  67. c.cache[key] = ele
  68. if c.maxItemSize != 0 && c.cacheList.Len() > c.maxItemSize {
  69. c.RemoveOldest()
  70. }
  71. }
  72. func (c *MemCache) Delete(key string) {
  73. c.mutex.Lock()
  74. defer c.mutex.Unlock()
  75. if c.cache == nil {
  76. return
  77. }
  78. if ele, ok := c.cache[key]; ok {
  79. c.cacheList.Remove(ele)
  80. key := ele.Value.(*entry).key
  81. delete(c.cache, key)
  82. return
  83. }
  84. }
  85. func (c *MemCache) RemoveOldest() {
  86. if c.cache == nil {
  87. return
  88. }
  89. ele := c.cacheList.Back()
  90. if ele != nil {
  91. c.cacheList.Remove(ele)
  92. key := ele.Value.(*entry).key
  93. delete(c.cache, key)
  94. }
  95. }
  96. // Add atomically adds n to i.
  97. func (i *AtomicInt) Add(n int64) {
  98. atomic.AddInt64((*int64)(i), n)
  99. }
  100. // Get atomically gets the value of i.
  101. func (i *AtomicInt) Get() int64 {
  102. return atomic.LoadInt64((*int64)(i))
  103. }