histogram.go 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577
  1. // Copyright 2015 The Prometheus Authors
  2. // Licensed under the Apache License, Version 2.0 (the "License");
  3. // you may not use this file except in compliance with the License.
  4. // You may obtain a copy of the License at
  5. //
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. package prometheus
  14. import (
  15. "fmt"
  16. "math"
  17. "runtime"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. dto "github.com/prometheus/client_model/go"
  23. "google.golang.org/protobuf/proto"
  24. "google.golang.org/protobuf/types/known/timestamppb"
  25. )
  26. // nativeHistogramBounds for the frac of observed values. Only relevant for
  27. // schema > 0. The position in the slice is the schema. (0 is never used, just
  28. // here for convenience of using the schema directly as the index.)
  29. //
  30. // TODO(beorn7): Currently, we do a binary search into these slices. There are
  31. // ways to turn it into a small number of simple array lookups. It probably only
  32. // matters for schema 5 and beyond, but should be investigated. See this comment
  33. // as a starting point:
  34. // https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
  35. var nativeHistogramBounds = [][]float64{
  36. // Schema "0":
  37. {0.5},
  38. // Schema 1:
  39. {0.5, 0.7071067811865475},
  40. // Schema 2:
  41. {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
  42. // Schema 3:
  43. {
  44. 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
  45. 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
  46. },
  47. // Schema 4:
  48. {
  49. 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
  50. 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
  51. 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
  52. 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
  53. },
  54. // Schema 5:
  55. {
  56. 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
  57. 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
  58. 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
  59. 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
  60. 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
  61. 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
  62. 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
  63. 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
  64. },
  65. // Schema 6:
  66. {
  67. 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
  68. 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
  69. 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
  70. 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
  71. 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
  72. 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
  73. 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
  74. 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
  75. 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
  76. 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
  77. 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
  78. 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
  79. 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
  80. 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
  81. 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
  82. 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
  83. },
  84. // Schema 7:
  85. {
  86. 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
  87. 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
  88. 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
  89. 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
  90. 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
  91. 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
  92. 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
  93. 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
  94. 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
  95. 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
  96. 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
  97. 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
  98. 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
  99. 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
  100. 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
  101. 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
  102. 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
  103. 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
  104. 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
  105. 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
  106. 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
  107. 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
  108. 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
  109. 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
  110. 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
  111. 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
  112. 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
  113. 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
  114. 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
  115. 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
  116. 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
  117. 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
  118. },
  119. // Schema 8:
  120. {
  121. 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
  122. 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
  123. 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
  124. 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
  125. 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
  126. 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
  127. 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
  128. 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
  129. 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
  130. 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
  131. 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
  132. 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
  133. 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
  134. 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
  135. 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
  136. 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
  137. 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
  138. 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
  139. 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
  140. 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
  141. 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
  142. 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
  143. 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
  144. 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
  145. 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
  146. 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
  147. 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
  148. 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
  149. 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
  150. 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
  151. 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
  152. 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
  153. 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
  154. 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
  155. 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
  156. 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
  157. 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
  158. 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
  159. 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
  160. 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
  161. 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
  162. 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
  163. 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
  164. 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
  165. 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
  166. 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
  167. 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
  168. 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
  169. 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
  170. 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
  171. 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
  172. 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
  173. 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
  174. 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
  175. 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
  176. 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
  177. 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
  178. 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
  179. 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
  180. 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
  181. 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
  182. 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
  183. 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
  184. 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
  185. },
  186. }
  187. // The nativeHistogramBounds above can be generated with the code below.
  188. //
  189. // TODO(beorn7): It's tempting to actually use `go generate` to generate the
  190. // code above. However, this could lead to slightly different numbers on
  191. // different architectures. We still need to come to terms if we are fine with
  192. // that, or if we might prefer to specify precise numbers in the standard.
  193. //
  194. // var nativeHistogramBounds [][]float64 = make([][]float64, 9)
  195. //
  196. // func init() {
  197. // // Populate nativeHistogramBounds.
  198. // numBuckets := 1
  199. // for i := range nativeHistogramBounds {
  200. // bounds := []float64{0.5}
  201. // factor := math.Exp2(math.Exp2(float64(-i)))
  202. // for j := 0; j < numBuckets-1; j++ {
  203. // var bound float64
  204. // if (j+1)%2 == 0 {
  205. // // Use previously calculated value for increased precision.
  206. // bound = nativeHistogramBounds[i-1][j/2+1]
  207. // } else {
  208. // bound = bounds[j] * factor
  209. // }
  210. // bounds = append(bounds, bound)
  211. // }
  212. // numBuckets *= 2
  213. // nativeHistogramBounds[i] = bounds
  214. // }
  215. // }
  216. // A Histogram counts individual observations from an event or sample stream in
  217. // configurable static buckets (or in dynamic sparse buckets as part of the
  218. // experimental Native Histograms, see below for more details). Similar to a
  219. // Summary, it also provides a sum of observations and an observation count.
  220. //
  221. // On the Prometheus server, quantiles can be calculated from a Histogram using
  222. // the histogram_quantile PromQL function.
  223. //
  224. // Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
  225. // (see the documentation for detailed procedures). However, Histograms require
  226. // the user to pre-define suitable buckets, and they are in general less
  227. // accurate. (Both problems are addressed by the experimental Native
  228. // Histograms. To use them, configure a NativeHistogramBucketFactor in the
  229. // HistogramOpts. They also require a Prometheus server v2.40+ with the
  230. // corresponding feature flag enabled.)
  231. //
  232. // The Observe method of a Histogram has a very low performance overhead in
  233. // comparison with the Observe method of a Summary.
  234. //
  235. // To create Histogram instances, use NewHistogram.
  236. type Histogram interface {
  237. Metric
  238. Collector
  239. // Observe adds a single observation to the histogram. Observations are
  240. // usually positive or zero. Negative observations are accepted but
  241. // prevent current versions of Prometheus from properly detecting
  242. // counter resets in the sum of observations. (The experimental Native
  243. // Histograms handle negative observations properly.) See
  244. // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
  245. // for details.
  246. Observe(float64)
  247. }
  248. // bucketLabel is used for the label that defines the upper bound of a
  249. // bucket of a histogram ("le" -> "less or equal").
  250. const bucketLabel = "le"
  251. // DefBuckets are the default Histogram buckets. The default buckets are
  252. // tailored to broadly measure the response time (in seconds) of a network
  253. // service. Most likely, however, you will be required to define buckets
  254. // customized to your use case.
  255. var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
  256. // DefNativeHistogramZeroThreshold is the default value for
  257. // NativeHistogramZeroThreshold in the HistogramOpts.
  258. //
  259. // The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
  260. // which is a bucket boundary at all possible resolutions.
  261. const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
  262. // NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
  263. // in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
  264. // bucket that only receives observations of precisely zero.
  265. const NativeHistogramZeroThresholdZero = -1
  266. var errBucketLabelNotAllowed = fmt.Errorf(
  267. "%q is not allowed as label name in histograms", bucketLabel,
  268. )
  269. // LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
  270. // lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
  271. // counted and not included in the returned slice. The returned slice is meant
  272. // to be used for the Buckets field of HistogramOpts.
  273. //
  274. // The function panics if 'count' is zero or negative.
  275. func LinearBuckets(start, width float64, count int) []float64 {
  276. if count < 1 {
  277. panic("LinearBuckets needs a positive count")
  278. }
  279. buckets := make([]float64, count)
  280. for i := range buckets {
  281. buckets[i] = start
  282. start += width
  283. }
  284. return buckets
  285. }
  286. // ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
  287. // has an upper bound of 'start' and each following bucket's upper bound is
  288. // 'factor' times the previous bucket's upper bound. The final +Inf bucket is
  289. // not counted and not included in the returned slice. The returned slice is
  290. // meant to be used for the Buckets field of HistogramOpts.
  291. //
  292. // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
  293. // or if 'factor' is less than or equal 1.
  294. func ExponentialBuckets(start, factor float64, count int) []float64 {
  295. if count < 1 {
  296. panic("ExponentialBuckets needs a positive count")
  297. }
  298. if start <= 0 {
  299. panic("ExponentialBuckets needs a positive start value")
  300. }
  301. if factor <= 1 {
  302. panic("ExponentialBuckets needs a factor greater than 1")
  303. }
  304. buckets := make([]float64, count)
  305. for i := range buckets {
  306. buckets[i] = start
  307. start *= factor
  308. }
  309. return buckets
  310. }
  311. // ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
  312. // 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
  313. // and not included in the returned slice. The returned slice is meant to be
  314. // used for the Buckets field of HistogramOpts.
  315. //
  316. // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
  317. func ExponentialBucketsRange(min, max float64, count int) []float64 {
  318. if count < 1 {
  319. panic("ExponentialBucketsRange count needs a positive count")
  320. }
  321. if min <= 0 {
  322. panic("ExponentialBucketsRange min needs to be greater than 0")
  323. }
  324. // Formula for exponential buckets.
  325. // max = min*growthFactor^(bucketCount-1)
  326. // We know max/min and highest bucket. Solve for growthFactor.
  327. growthFactor := math.Pow(max/min, 1.0/float64(count-1))
  328. // Now that we know growthFactor, solve for each bucket.
  329. buckets := make([]float64, count)
  330. for i := 1; i <= count; i++ {
  331. buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
  332. }
  333. return buckets
  334. }
  335. // HistogramOpts bundles the options for creating a Histogram metric. It is
  336. // mandatory to set Name to a non-empty string. All other fields are optional
  337. // and can safely be left at their zero value, although it is strongly
  338. // encouraged to set a Help string.
  339. type HistogramOpts struct {
  340. // Namespace, Subsystem, and Name are components of the fully-qualified
  341. // name of the Histogram (created by joining these components with
  342. // "_"). Only Name is mandatory, the others merely help structuring the
  343. // name. Note that the fully-qualified name of the Histogram must be a
  344. // valid Prometheus metric name.
  345. Namespace string
  346. Subsystem string
  347. Name string
  348. // Help provides information about this Histogram.
  349. //
  350. // Metrics with the same fully-qualified name must have the same Help
  351. // string.
  352. Help string
  353. // ConstLabels are used to attach fixed labels to this metric. Metrics
  354. // with the same fully-qualified name must have the same label names in
  355. // their ConstLabels.
  356. //
  357. // ConstLabels are only used rarely. In particular, do not use them to
  358. // attach the same labels to all your metrics. Those use cases are
  359. // better covered by target labels set by the scraping Prometheus
  360. // server, or by one specific metric (e.g. a build_info or a
  361. // machine_role metric). See also
  362. // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
  363. ConstLabels Labels
  364. // Buckets defines the buckets into which observations are counted. Each
  365. // element in the slice is the upper inclusive bound of a bucket. The
  366. // values must be sorted in strictly increasing order. There is no need
  367. // to add a highest bucket with +Inf bound, it will be added
  368. // implicitly. If Buckets is left as nil or set to a slice of length
  369. // zero, it is replaced by default buckets. The default buckets are
  370. // DefBuckets if no buckets for a native histogram (see below) are used,
  371. // otherwise the default is no buckets. (In other words, if you want to
  372. // use both regular buckets and buckets for a native histogram, you have
  373. // to define the regular buckets here explicitly.)
  374. Buckets []float64
  375. // If NativeHistogramBucketFactor is greater than one, so-called sparse
  376. // buckets are used (in addition to the regular buckets, if defined
  377. // above). A Histogram with sparse buckets will be ingested as a Native
  378. // Histogram by a Prometheus server with that feature enabled (requires
  379. // Prometheus v2.40+). Sparse buckets are exponential buckets covering
  380. // the whole float64 range (with the exception of the “zero” bucket, see
  381. // NativeHistogramZeroThreshold below). From any one bucket to the next,
  382. // the width of the bucket grows by a constant
  383. // factor. NativeHistogramBucketFactor provides an upper bound for this
  384. // factor (exception see below). The smaller
  385. // NativeHistogramBucketFactor, the more buckets will be used and thus
  386. // the more costly the histogram will become. A generally good trade-off
  387. // between cost and accuracy is a value of 1.1 (each bucket is at most
  388. // 10% wider than the previous one), which will result in each power of
  389. // two divided into 8 buckets (e.g. there will be 8 buckets between 1
  390. // and 2, same as between 2 and 4, and 4 and 8, etc.).
  391. //
  392. // Details about the actually used factor: The factor is calculated as
  393. // 2^(2^-n), where n is an integer number between (and including) -4 and
  394. // 8. n is chosen so that the resulting factor is the largest that is
  395. // still smaller or equal to NativeHistogramBucketFactor. Note that the
  396. // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
  397. // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
  398. // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
  399. // it is larger than the provided NativeHistogramBucketFactor.
  400. //
  401. // NOTE: Native Histograms are still an experimental feature. Their
  402. // behavior might still change without a major version
  403. // bump. Subsequently, all NativeHistogram... options here might still
  404. // change their behavior or name (or might completely disappear) without
  405. // a major version bump.
  406. NativeHistogramBucketFactor float64
  407. // All observations with an absolute value of less or equal
  408. // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
  409. // For best results, this should be close to a bucket boundary. This is
  410. // usually the case if picking a power of two. If
  411. // NativeHistogramZeroThreshold is left at zero,
  412. // DefNativeHistogramZeroThreshold is used as the threshold. To
  413. // configure a zero bucket with an actual threshold of zero (i.e. only
  414. // observations of precisely zero will go into the zero bucket), set
  415. // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
  416. // constant (or any negative float value).
  417. NativeHistogramZeroThreshold float64
  418. // The remaining fields define a strategy to limit the number of
  419. // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
  420. // at zero, the number of buckets is not limited. (Note that this might
  421. // lead to unbounded memory consumption if the values observed by the
  422. // Histogram are sufficiently wide-spread. In particular, this could be
  423. // used as a DoS attack vector. Where the observed values depend on
  424. // external inputs, it is highly recommended to set a
  425. // NativeHistogramMaxBucketNumber.) Once the set
  426. // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
  427. // enacted:
  428. // - First, if the last reset (or the creation) of the histogram is at
  429. // least NativeHistogramMinResetDuration ago, then the whole
  430. // histogram is reset to its initial state (including regular
  431. // buckets).
  432. // - If less time has passed, or if NativeHistogramMinResetDuration is
  433. // zero, no reset is performed. Instead, the zero threshold is
  434. // increased sufficiently to reduce the number of buckets to or below
  435. // NativeHistogramMaxBucketNumber, but not to more than
  436. // NativeHistogramMaxZeroThreshold. Thus, if
  437. // NativeHistogramMaxZeroThreshold is already at or below the current
  438. // zero threshold, nothing happens at this step.
  439. // - After that, if the number of buckets still exceeds
  440. // NativeHistogramMaxBucketNumber, the resolution of the histogram is
  441. // reduced by doubling the width of the sparse buckets (up to a
  442. // growth factor between one bucket to the next of 2^(2^4) = 65536,
  443. // see above).
  444. // - Any increased zero threshold or reduced resolution is reset back
  445. // to their original values once NativeHistogramMinResetDuration has
  446. // passed (since the last reset or the creation of the histogram).
  447. NativeHistogramMaxBucketNumber uint32
  448. NativeHistogramMinResetDuration time.Duration
  449. NativeHistogramMaxZeroThreshold float64
  450. // now is for testing purposes, by default it's time.Now.
  451. now func() time.Time
  452. // afterFunc is for testing purposes, by default it's time.AfterFunc.
  453. afterFunc func(time.Duration, func()) *time.Timer
  454. }
  455. // HistogramVecOpts bundles the options to create a HistogramVec metric.
  456. // It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
  457. // is optional and can safely be left to its default value.
  458. type HistogramVecOpts struct {
  459. HistogramOpts
  460. // VariableLabels are used to partition the metric vector by the given set
  461. // of labels. Each label value will be constrained with the optional Constraint
  462. // function, if provided.
  463. VariableLabels ConstrainableLabels
  464. }
  465. // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
  466. // panics if the buckets in HistogramOpts are not in strictly increasing order.
  467. //
  468. // The returned implementation also implements ExemplarObserver. It is safe to
  469. // perform the corresponding type assertion. Exemplars are tracked separately
  470. // for each bucket.
  471. func NewHistogram(opts HistogramOpts) Histogram {
  472. return newHistogram(
  473. NewDesc(
  474. BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
  475. opts.Help,
  476. nil,
  477. opts.ConstLabels,
  478. ),
  479. opts,
  480. )
  481. }
  482. func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
  483. if len(desc.variableLabels.names) != len(labelValues) {
  484. panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
  485. }
  486. for _, n := range desc.variableLabels.names {
  487. if n == bucketLabel {
  488. panic(errBucketLabelNotAllowed)
  489. }
  490. }
  491. for _, lp := range desc.constLabelPairs {
  492. if lp.GetName() == bucketLabel {
  493. panic(errBucketLabelNotAllowed)
  494. }
  495. }
  496. if opts.now == nil {
  497. opts.now = time.Now
  498. }
  499. if opts.afterFunc == nil {
  500. opts.afterFunc = time.AfterFunc
  501. }
  502. h := &histogram{
  503. desc: desc,
  504. upperBounds: opts.Buckets,
  505. labelPairs: MakeLabelPairs(desc, labelValues),
  506. nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
  507. nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
  508. nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
  509. lastResetTime: opts.now(),
  510. now: opts.now,
  511. afterFunc: opts.afterFunc,
  512. }
  513. if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
  514. h.upperBounds = DefBuckets
  515. }
  516. if opts.NativeHistogramBucketFactor <= 1 {
  517. h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
  518. } else {
  519. switch {
  520. case opts.NativeHistogramZeroThreshold > 0:
  521. h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
  522. case opts.NativeHistogramZeroThreshold == 0:
  523. h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
  524. } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
  525. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
  526. }
  527. for i, upperBound := range h.upperBounds {
  528. if i < len(h.upperBounds)-1 {
  529. if upperBound >= h.upperBounds[i+1] {
  530. panic(fmt.Errorf(
  531. "histogram buckets must be in increasing order: %f >= %f",
  532. upperBound, h.upperBounds[i+1],
  533. ))
  534. }
  535. } else {
  536. if math.IsInf(upperBound, +1) {
  537. // The +Inf bucket is implicit. Remove it here.
  538. h.upperBounds = h.upperBounds[:i]
  539. }
  540. }
  541. }
  542. // Finally we know the final length of h.upperBounds and can make buckets
  543. // for both counts as well as exemplars:
  544. h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
  545. atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  546. atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
  547. h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
  548. atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  549. atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
  550. h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
  551. h.init(h) // Init self-collection.
  552. return h
  553. }
  554. type histogramCounts struct {
  555. // Order in this struct matters for the alignment required by atomic
  556. // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
  557. // sumBits contains the bits of the float64 representing the sum of all
  558. // observations.
  559. sumBits uint64
  560. count uint64
  561. // nativeHistogramZeroBucket counts all (positive and negative)
  562. // observations in the zero bucket (with an absolute value less or equal
  563. // the current threshold, see next field.
  564. nativeHistogramZeroBucket uint64
  565. // nativeHistogramZeroThresholdBits is the bit pattern of the current
  566. // threshold for the zero bucket. It's initially equal to
  567. // nativeHistogramZeroThreshold but may change according to the bucket
  568. // count limitation strategy.
  569. nativeHistogramZeroThresholdBits uint64
  570. // nativeHistogramSchema may change over time according to the bucket
  571. // count limitation strategy and therefore has to be saved here.
  572. nativeHistogramSchema int32
  573. // Number of (positive and negative) sparse buckets.
  574. nativeHistogramBucketsNumber uint32
  575. // Regular buckets.
  576. buckets []uint64
  577. // The sparse buckets for native histograms are implemented with a
  578. // sync.Map for now. A dedicated data structure will likely be more
  579. // efficient. There are separate maps for negative and positive
  580. // observations. The map's value is an *int64, counting observations in
  581. // that bucket. (Note that we don't use uint64 as an int64 won't
  582. // overflow in practice, and working with signed numbers from the
  583. // beginning simplifies the handling of deltas.) The map's key is the
  584. // index of the bucket according to the used
  585. // nativeHistogramSchema. Index 0 is for an upper bound of 1.
  586. nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
  587. }
  588. // observe manages the parts of observe that only affects
  589. // histogramCounts. doSparse is true if sparse buckets should be done,
  590. // too.
  591. func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
  592. if bucket < len(hc.buckets) {
  593. atomic.AddUint64(&hc.buckets[bucket], 1)
  594. }
  595. atomicAddFloat(&hc.sumBits, v)
  596. if doSparse && !math.IsNaN(v) {
  597. var (
  598. key int
  599. schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
  600. zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
  601. bucketCreated, isInf bool
  602. )
  603. if math.IsInf(v, 0) {
  604. // Pretend v is MaxFloat64 but later increment key by one.
  605. if math.IsInf(v, +1) {
  606. v = math.MaxFloat64
  607. } else {
  608. v = -math.MaxFloat64
  609. }
  610. isInf = true
  611. }
  612. frac, exp := math.Frexp(math.Abs(v))
  613. if schema > 0 {
  614. bounds := nativeHistogramBounds[schema]
  615. key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
  616. } else {
  617. key = exp
  618. if frac == 0.5 {
  619. key--
  620. }
  621. offset := (1 << -schema) - 1
  622. key = (key + offset) >> -schema
  623. }
  624. if isInf {
  625. key++
  626. }
  627. switch {
  628. case v > zeroThreshold:
  629. bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
  630. case v < -zeroThreshold:
  631. bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
  632. default:
  633. atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
  634. }
  635. if bucketCreated {
  636. atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
  637. }
  638. }
  639. // Increment count last as we take it as a signal that the observation
  640. // is complete.
  641. atomic.AddUint64(&hc.count, 1)
  642. }
  643. type histogram struct {
  644. // countAndHotIdx enables lock-free writes with use of atomic updates.
  645. // The most significant bit is the hot index [0 or 1] of the count field
  646. // below. Observe calls update the hot one. All remaining bits count the
  647. // number of Observe calls. Observe starts by incrementing this counter,
  648. // and finish by incrementing the count field in the respective
  649. // histogramCounts, as a marker for completion.
  650. //
  651. // Calls of the Write method (which are non-mutating reads from the
  652. // perspective of the histogram) swap the hot–cold under the writeMtx
  653. // lock. A cooldown is awaited (while locked) by comparing the number of
  654. // observations with the initiation count. Once they match, then the
  655. // last observation on the now cool one has completed. All cold fields must
  656. // be merged into the new hot before releasing writeMtx.
  657. //
  658. // Fields with atomic access first! See alignment constraint:
  659. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
  660. countAndHotIdx uint64
  661. selfCollector
  662. desc *Desc
  663. // Only used in the Write method and for sparse bucket management.
  664. mtx sync.Mutex
  665. // Two counts, one is "hot" for lock-free observations, the other is
  666. // "cold" for writing out a dto.Metric. It has to be an array of
  667. // pointers to guarantee 64bit alignment of the histogramCounts, see
  668. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
  669. counts [2]*histogramCounts
  670. upperBounds []float64
  671. labelPairs []*dto.LabelPair
  672. exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
  673. nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
  674. nativeHistogramZeroThreshold float64 // The initial zero threshold.
  675. nativeHistogramMaxZeroThreshold float64
  676. nativeHistogramMaxBuckets uint32
  677. nativeHistogramMinResetDuration time.Duration
  678. // lastResetTime is protected by mtx. It is also used as created timestamp.
  679. lastResetTime time.Time
  680. // resetScheduled is protected by mtx. It is true if a reset is
  681. // scheduled for a later time (when nativeHistogramMinResetDuration has
  682. // passed).
  683. resetScheduled bool
  684. // now is for testing purposes, by default it's time.Now.
  685. now func() time.Time
  686. // afterFunc is for testing purposes, by default it's time.AfterFunc.
  687. afterFunc func(time.Duration, func()) *time.Timer
  688. }
  689. func (h *histogram) Desc() *Desc {
  690. return h.desc
  691. }
  692. func (h *histogram) Observe(v float64) {
  693. h.observe(v, h.findBucket(v))
  694. }
  695. func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
  696. i := h.findBucket(v)
  697. h.observe(v, i)
  698. h.updateExemplar(v, i, e)
  699. }
  700. func (h *histogram) Write(out *dto.Metric) error {
  701. // For simplicity, we protect this whole method by a mutex. It is not in
  702. // the hot path, i.e. Observe is called much more often than Write. The
  703. // complication of making Write lock-free isn't worth it, if possible at
  704. // all.
  705. h.mtx.Lock()
  706. defer h.mtx.Unlock()
  707. // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
  708. // without touching the count bits. See the struct comments for a full
  709. // description of the algorithm.
  710. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  711. // count is contained unchanged in the lower 63 bits.
  712. count := n & ((1 << 63) - 1)
  713. // The most significant bit tells us which counts is hot. The complement
  714. // is thus the cold one.
  715. hotCounts := h.counts[n>>63]
  716. coldCounts := h.counts[(^n)>>63]
  717. waitForCooldown(count, coldCounts)
  718. his := &dto.Histogram{
  719. Bucket: make([]*dto.Bucket, len(h.upperBounds)),
  720. SampleCount: proto.Uint64(count),
  721. SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
  722. CreatedTimestamp: timestamppb.New(h.lastResetTime),
  723. }
  724. out.Histogram = his
  725. out.Label = h.labelPairs
  726. var cumCount uint64
  727. for i, upperBound := range h.upperBounds {
  728. cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
  729. his.Bucket[i] = &dto.Bucket{
  730. CumulativeCount: proto.Uint64(cumCount),
  731. UpperBound: proto.Float64(upperBound),
  732. }
  733. if e := h.exemplars[i].Load(); e != nil {
  734. his.Bucket[i].Exemplar = e.(*dto.Exemplar)
  735. }
  736. }
  737. // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
  738. if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
  739. b := &dto.Bucket{
  740. CumulativeCount: proto.Uint64(count),
  741. UpperBound: proto.Float64(math.Inf(1)),
  742. Exemplar: e.(*dto.Exemplar),
  743. }
  744. his.Bucket = append(his.Bucket, b)
  745. }
  746. if h.nativeHistogramSchema > math.MinInt32 {
  747. his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
  748. his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
  749. zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
  750. defer func() {
  751. coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
  752. coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
  753. }()
  754. his.ZeroCount = proto.Uint64(zeroBucket)
  755. his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
  756. his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
  757. // Add a no-op span to a histogram without observations and with
  758. // a zero threshold of zero. Otherwise, a native histogram would
  759. // look like a classic histogram to scrapers.
  760. if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
  761. his.PositiveSpan = []*dto.BucketSpan{{
  762. Offset: proto.Int32(0),
  763. Length: proto.Uint32(0),
  764. }}
  765. }
  766. }
  767. addAndResetCounts(hotCounts, coldCounts)
  768. return nil
  769. }
  770. // findBucket returns the index of the bucket for the provided value, or
  771. // len(h.upperBounds) for the +Inf bucket.
  772. func (h *histogram) findBucket(v float64) int {
  773. // TODO(beorn7): For small numbers of buckets (<30), a linear search is
  774. // slightly faster than the binary search. If we really care, we could
  775. // switch from one search strategy to the other depending on the number
  776. // of buckets.
  777. //
  778. // Microbenchmarks (BenchmarkHistogramNoLabels):
  779. // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
  780. // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
  781. // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
  782. return sort.SearchFloat64s(h.upperBounds, v)
  783. }
  784. // observe is the implementation for Observe without the findBucket part.
  785. func (h *histogram) observe(v float64, bucket int) {
  786. // Do not add to sparse buckets for NaN observations.
  787. doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
  788. // We increment h.countAndHotIdx so that the counter in the lower
  789. // 63 bits gets incremented. At the same time, we get the new value
  790. // back, which we can use to find the currently-hot counts.
  791. n := atomic.AddUint64(&h.countAndHotIdx, 1)
  792. hotCounts := h.counts[n>>63]
  793. hotCounts.observe(v, bucket, doSparse)
  794. if doSparse {
  795. h.limitBuckets(hotCounts, v, bucket)
  796. }
  797. }
  798. // limitBuckets applies a strategy to limit the number of populated sparse
  799. // buckets. It's generally best effort, and there are situations where the
  800. // number can go higher (if even the lowest resolution isn't enough to reduce
  801. // the number sufficiently, or if the provided counts aren't fully updated yet
  802. // by a concurrently happening Write call).
  803. func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
  804. if h.nativeHistogramMaxBuckets == 0 {
  805. return // No limit configured.
  806. }
  807. if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
  808. return // Bucket limit not exceeded yet.
  809. }
  810. h.mtx.Lock()
  811. defer h.mtx.Unlock()
  812. // The hot counts might have been swapped just before we acquired the
  813. // lock. Re-fetch the hot counts first...
  814. n := atomic.LoadUint64(&h.countAndHotIdx)
  815. hotIdx := n >> 63
  816. coldIdx := (^n) >> 63
  817. hotCounts := h.counts[hotIdx]
  818. coldCounts := h.counts[coldIdx]
  819. // ...and then check again if we really have to reduce the bucket count.
  820. if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
  821. return // Bucket limit not exceeded after all.
  822. }
  823. // Try the various strategies in order.
  824. if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
  825. return
  826. }
  827. // One of the other strategies will happen. To undo what they will do as
  828. // soon as enough time has passed to satisfy
  829. // h.nativeHistogramMinResetDuration, schedule a reset at the right time
  830. // if we haven't done so already.
  831. if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
  832. h.resetScheduled = true
  833. h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
  834. }
  835. if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
  836. return
  837. }
  838. h.doubleBucketWidth(hotCounts, coldCounts)
  839. }
  840. // maybeReset resets the whole histogram if at least
  841. // h.nativeHistogramMinResetDuration has been passed. It returns true if the
  842. // histogram has been reset. The caller must have locked h.mtx.
  843. func (h *histogram) maybeReset(
  844. hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
  845. ) bool {
  846. // We are using the possibly mocked h.now() rather than
  847. // time.Since(h.lastResetTime) to enable testing.
  848. if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
  849. h.resetScheduled || // Do not interefere if a reset is already scheduled.
  850. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
  851. return false
  852. }
  853. // Completely reset coldCounts.
  854. h.resetCounts(cold)
  855. // Repeat the latest observation to not lose it completely.
  856. cold.observe(value, bucket, true)
  857. // Make coldCounts the new hot counts while resetting countAndHotIdx.
  858. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
  859. count := n & ((1 << 63) - 1)
  860. waitForCooldown(count, hot)
  861. // Finally, reset the formerly hot counts, too.
  862. h.resetCounts(hot)
  863. h.lastResetTime = h.now()
  864. return true
  865. }
  866. // reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
  867. // called without having locked h.mtx.
  868. func (h *histogram) reset() {
  869. h.mtx.Lock()
  870. defer h.mtx.Unlock()
  871. n := atomic.LoadUint64(&h.countAndHotIdx)
  872. hotIdx := n >> 63
  873. coldIdx := (^n) >> 63
  874. hot := h.counts[hotIdx]
  875. cold := h.counts[coldIdx]
  876. // Completely reset coldCounts.
  877. h.resetCounts(cold)
  878. // Make coldCounts the new hot counts while resetting countAndHotIdx.
  879. n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
  880. count := n & ((1 << 63) - 1)
  881. waitForCooldown(count, hot)
  882. // Finally, reset the formerly hot counts, too.
  883. h.resetCounts(hot)
  884. h.lastResetTime = h.now()
  885. h.resetScheduled = false
  886. }
  887. // maybeWidenZeroBucket widens the zero bucket until it includes the existing
  888. // buckets closest to the zero bucket (which could be two, if an equidistant
  889. // negative and a positive bucket exists, but usually it's only one bucket to be
  890. // merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
  891. // limits how far the zero bucket can be extended, and if that's not enough to
  892. // include an existing bucket, the method returns false. The caller must have
  893. // locked h.mtx.
  894. func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
  895. currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
  896. if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
  897. return false
  898. }
  899. // Find the key of the bucket closest to zero.
  900. smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
  901. smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
  902. if smallestNegativeKey < smallestKey {
  903. smallestKey = smallestNegativeKey
  904. }
  905. if smallestKey == math.MaxInt32 {
  906. return false
  907. }
  908. newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
  909. if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
  910. return false // New threshold would exceed the max threshold.
  911. }
  912. atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
  913. // Remove applicable buckets.
  914. if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
  915. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  916. }
  917. if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
  918. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  919. }
  920. // Make cold counts the new hot counts.
  921. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  922. count := n & ((1 << 63) - 1)
  923. // Swap the pointer names to represent the new roles and make
  924. // the rest less confusing.
  925. hot, cold = cold, hot
  926. waitForCooldown(count, cold)
  927. // Add all the now cold counts to the new hot counts...
  928. addAndResetCounts(hot, cold)
  929. // ...adjust the new zero threshold in the cold counts, too...
  930. atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
  931. // ...and then merge the newly deleted buckets into the wider zero
  932. // bucket.
  933. mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
  934. return func(k, v interface{}) bool {
  935. key := k.(int)
  936. bucket := v.(*int64)
  937. if key == smallestKey {
  938. // Merge into hot zero bucket...
  939. atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
  940. // ...and delete from cold counts.
  941. coldBuckets.Delete(key)
  942. atomicDecUint32(&cold.nativeHistogramBucketsNumber)
  943. } else {
  944. // Add to corresponding hot bucket...
  945. if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
  946. atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
  947. }
  948. // ...and reset cold bucket.
  949. atomic.StoreInt64(bucket, 0)
  950. }
  951. return true
  952. }
  953. }
  954. cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
  955. cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
  956. return true
  957. }
  958. // doubleBucketWidth doubles the bucket width (by decrementing the schema
  959. // number). Note that very sparse buckets could lead to a low reduction of the
  960. // bucket count (or even no reduction at all). The method does nothing if the
  961. // schema is already -4.
  962. func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
  963. coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
  964. if coldSchema == -4 {
  965. return // Already at lowest resolution.
  966. }
  967. coldSchema--
  968. atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
  969. // Play it simple and just delete all cold buckets.
  970. atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
  971. deleteSyncMap(&cold.nativeHistogramBucketsNegative)
  972. deleteSyncMap(&cold.nativeHistogramBucketsPositive)
  973. // Make coldCounts the new hot counts.
  974. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
  975. count := n & ((1 << 63) - 1)
  976. // Swap the pointer names to represent the new roles and make
  977. // the rest less confusing.
  978. hot, cold = cold, hot
  979. waitForCooldown(count, cold)
  980. // Add all the now cold counts to the new hot counts...
  981. addAndResetCounts(hot, cold)
  982. // ...adjust the schema in the cold counts, too...
  983. atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
  984. // ...and then merge the cold buckets into the wider hot buckets.
  985. merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
  986. return func(k, v interface{}) bool {
  987. key := k.(int)
  988. bucket := v.(*int64)
  989. // Adjust key to match the bucket to merge into.
  990. if key > 0 {
  991. key++
  992. }
  993. key /= 2
  994. // Add to corresponding hot bucket.
  995. if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
  996. atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
  997. }
  998. return true
  999. }
  1000. }
  1001. cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
  1002. cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
  1003. // Play it simple again and just delete all cold buckets.
  1004. atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
  1005. deleteSyncMap(&cold.nativeHistogramBucketsNegative)
  1006. deleteSyncMap(&cold.nativeHistogramBucketsPositive)
  1007. }
  1008. func (h *histogram) resetCounts(counts *histogramCounts) {
  1009. atomic.StoreUint64(&counts.sumBits, 0)
  1010. atomic.StoreUint64(&counts.count, 0)
  1011. atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
  1012. atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
  1013. atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
  1014. atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
  1015. for i := range h.upperBounds {
  1016. atomic.StoreUint64(&counts.buckets[i], 0)
  1017. }
  1018. deleteSyncMap(&counts.nativeHistogramBucketsNegative)
  1019. deleteSyncMap(&counts.nativeHistogramBucketsPositive)
  1020. }
  1021. // updateExemplar replaces the exemplar for the provided bucket. With empty
  1022. // labels, it's a no-op. It panics if any of the labels is invalid.
  1023. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
  1024. if l == nil {
  1025. return
  1026. }
  1027. e, err := newExemplar(v, h.now(), l)
  1028. if err != nil {
  1029. panic(err)
  1030. }
  1031. h.exemplars[bucket].Store(e)
  1032. }
  1033. // HistogramVec is a Collector that bundles a set of Histograms that all share the
  1034. // same Desc, but have different values for their variable labels. This is used
  1035. // if you want to count the same thing partitioned by various dimensions
  1036. // (e.g. HTTP request latencies, partitioned by status code and method). Create
  1037. // instances with NewHistogramVec.
  1038. type HistogramVec struct {
  1039. *MetricVec
  1040. }
  1041. // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
  1042. // partitioned by the given label names.
  1043. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
  1044. return V2.NewHistogramVec(HistogramVecOpts{
  1045. HistogramOpts: opts,
  1046. VariableLabels: UnconstrainedLabels(labelNames),
  1047. })
  1048. }
  1049. // NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
  1050. func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
  1051. desc := V2.NewDesc(
  1052. BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
  1053. opts.Help,
  1054. opts.VariableLabels,
  1055. opts.ConstLabels,
  1056. )
  1057. return &HistogramVec{
  1058. MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
  1059. return newHistogram(desc, opts.HistogramOpts, lvs...)
  1060. }),
  1061. }
  1062. }
  1063. // GetMetricWithLabelValues returns the Histogram for the given slice of label
  1064. // values (same order as the variable labels in Desc). If that combination of
  1065. // label values is accessed for the first time, a new Histogram is created.
  1066. //
  1067. // It is possible to call this method without using the returned Histogram to only
  1068. // create the new Histogram but leave it at its starting value, a Histogram without
  1069. // any observations.
  1070. //
  1071. // Keeping the Histogram for later use is possible (and should be considered if
  1072. // performance is critical), but keep in mind that Reset, DeleteLabelValues and
  1073. // Delete can be used to delete the Histogram from the HistogramVec. In that case, the
  1074. // Histogram will still exist, but it will not be exported anymore, even if a
  1075. // Histogram with the same label values is created later. See also the CounterVec
  1076. // example.
  1077. //
  1078. // An error is returned if the number of label values is not the same as the
  1079. // number of variable labels in Desc (minus any curried labels).
  1080. //
  1081. // Note that for more than one label value, this method is prone to mistakes
  1082. // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
  1083. // an alternative to avoid that type of mistake. For higher label numbers, the
  1084. // latter has a much more readable (albeit more verbose) syntax, but it comes
  1085. // with a performance overhead (for creating and processing the Labels map).
  1086. // See also the GaugeVec example.
  1087. func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
  1088. metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
  1089. if metric != nil {
  1090. return metric.(Observer), err
  1091. }
  1092. return nil, err
  1093. }
  1094. // GetMetricWith returns the Histogram for the given Labels map (the label names
  1095. // must match those of the variable labels in Desc). If that label map is
  1096. // accessed for the first time, a new Histogram is created. Implications of
  1097. // creating a Histogram without using it and keeping the Histogram for later use
  1098. // are the same as for GetMetricWithLabelValues.
  1099. //
  1100. // An error is returned if the number and names of the Labels are inconsistent
  1101. // with those of the variable labels in Desc (minus any curried labels).
  1102. //
  1103. // This method is used for the same purpose as
  1104. // GetMetricWithLabelValues(...string). See there for pros and cons of the two
  1105. // methods.
  1106. func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
  1107. metric, err := v.MetricVec.GetMetricWith(labels)
  1108. if metric != nil {
  1109. return metric.(Observer), err
  1110. }
  1111. return nil, err
  1112. }
  1113. // WithLabelValues works as GetMetricWithLabelValues, but panics where
  1114. // GetMetricWithLabelValues would have returned an error. Not returning an
  1115. // error allows shortcuts like
  1116. //
  1117. // myVec.WithLabelValues("404", "GET").Observe(42.21)
  1118. func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
  1119. h, err := v.GetMetricWithLabelValues(lvs...)
  1120. if err != nil {
  1121. panic(err)
  1122. }
  1123. return h
  1124. }
  1125. // With works as GetMetricWith but panics where GetMetricWithLabels would have
  1126. // returned an error. Not returning an error allows shortcuts like
  1127. //
  1128. // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
  1129. func (v *HistogramVec) With(labels Labels) Observer {
  1130. h, err := v.GetMetricWith(labels)
  1131. if err != nil {
  1132. panic(err)
  1133. }
  1134. return h
  1135. }
  1136. // CurryWith returns a vector curried with the provided labels, i.e. the
  1137. // returned vector has those labels pre-set for all labeled operations performed
  1138. // on it. The cardinality of the curried vector is reduced accordingly. The
  1139. // order of the remaining labels stays the same (just with the curried labels
  1140. // taken out of the sequence – which is relevant for the
  1141. // (GetMetric)WithLabelValues methods). It is possible to curry a curried
  1142. // vector, but only with labels not yet used for currying before.
  1143. //
  1144. // The metrics contained in the HistogramVec are shared between the curried and
  1145. // uncurried vectors. They are just accessed differently. Curried and uncurried
  1146. // vectors behave identically in terms of collection. Only one must be
  1147. // registered with a given registry (usually the uncurried version). The Reset
  1148. // method deletes all metrics, even if called on a curried vector.
  1149. func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
  1150. vec, err := v.MetricVec.CurryWith(labels)
  1151. if vec != nil {
  1152. return &HistogramVec{vec}, err
  1153. }
  1154. return nil, err
  1155. }
  1156. // MustCurryWith works as CurryWith but panics where CurryWith would have
  1157. // returned an error.
  1158. func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
  1159. vec, err := v.CurryWith(labels)
  1160. if err != nil {
  1161. panic(err)
  1162. }
  1163. return vec
  1164. }
  1165. type constHistogram struct {
  1166. desc *Desc
  1167. count uint64
  1168. sum float64
  1169. buckets map[float64]uint64
  1170. labelPairs []*dto.LabelPair
  1171. createdTs *timestamppb.Timestamp
  1172. }
  1173. func (h *constHistogram) Desc() *Desc {
  1174. return h.desc
  1175. }
  1176. func (h *constHistogram) Write(out *dto.Metric) error {
  1177. his := &dto.Histogram{
  1178. CreatedTimestamp: h.createdTs,
  1179. }
  1180. buckets := make([]*dto.Bucket, 0, len(h.buckets))
  1181. his.SampleCount = proto.Uint64(h.count)
  1182. his.SampleSum = proto.Float64(h.sum)
  1183. for upperBound, count := range h.buckets {
  1184. buckets = append(buckets, &dto.Bucket{
  1185. CumulativeCount: proto.Uint64(count),
  1186. UpperBound: proto.Float64(upperBound),
  1187. })
  1188. }
  1189. if len(buckets) > 0 {
  1190. sort.Sort(buckSort(buckets))
  1191. }
  1192. his.Bucket = buckets
  1193. out.Histogram = his
  1194. out.Label = h.labelPairs
  1195. return nil
  1196. }
  1197. // NewConstHistogram returns a metric representing a Prometheus histogram with
  1198. // fixed values for the count, sum, and bucket counts. As those parameters
  1199. // cannot be changed, the returned value does not implement the Histogram
  1200. // interface (but only the Metric interface). Users of this package will not
  1201. // have much use for it in regular operations. However, when implementing custom
  1202. // Collectors, it is useful as a throw-away metric that is generated on the fly
  1203. // to send it to Prometheus in the Collect method.
  1204. //
  1205. // buckets is a map of upper bounds to cumulative counts, excluding the +Inf
  1206. // bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
  1207. //
  1208. // NewConstHistogram returns an error if the length of labelValues is not
  1209. // consistent with the variable labels in Desc or if Desc is invalid.
  1210. func NewConstHistogram(
  1211. desc *Desc,
  1212. count uint64,
  1213. sum float64,
  1214. buckets map[float64]uint64,
  1215. labelValues ...string,
  1216. ) (Metric, error) {
  1217. if desc.err != nil {
  1218. return nil, desc.err
  1219. }
  1220. if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
  1221. return nil, err
  1222. }
  1223. return &constHistogram{
  1224. desc: desc,
  1225. count: count,
  1226. sum: sum,
  1227. buckets: buckets,
  1228. labelPairs: MakeLabelPairs(desc, labelValues),
  1229. }, nil
  1230. }
  1231. // MustNewConstHistogram is a version of NewConstHistogram that panics where
  1232. // NewConstHistogram would have returned an error.
  1233. func MustNewConstHistogram(
  1234. desc *Desc,
  1235. count uint64,
  1236. sum float64,
  1237. buckets map[float64]uint64,
  1238. labelValues ...string,
  1239. ) Metric {
  1240. m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
  1241. if err != nil {
  1242. panic(err)
  1243. }
  1244. return m
  1245. }
  1246. type buckSort []*dto.Bucket
  1247. func (s buckSort) Len() int {
  1248. return len(s)
  1249. }
  1250. func (s buckSort) Swap(i, j int) {
  1251. s[i], s[j] = s[j], s[i]
  1252. }
  1253. func (s buckSort) Less(i, j int) bool {
  1254. return s[i].GetUpperBound() < s[j].GetUpperBound()
  1255. }
  1256. // pickSchema returns the largest number n between -4 and 8 such that
  1257. // 2^(2^-n) is less or equal the provided bucketFactor.
  1258. //
  1259. // Special cases:
  1260. // - bucketFactor <= 1: panics.
  1261. // - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
  1262. func pickSchema(bucketFactor float64) int32 {
  1263. if bucketFactor <= 1 {
  1264. panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
  1265. }
  1266. floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
  1267. switch {
  1268. case floor <= -8:
  1269. return 8
  1270. case floor >= 4:
  1271. return -4
  1272. default:
  1273. return -int32(floor)
  1274. }
  1275. }
  1276. func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
  1277. var ii []int
  1278. buckets.Range(func(k, v interface{}) bool {
  1279. ii = append(ii, k.(int))
  1280. return true
  1281. })
  1282. sort.Ints(ii)
  1283. if len(ii) == 0 {
  1284. return nil, nil
  1285. }
  1286. var (
  1287. spans []*dto.BucketSpan
  1288. deltas []int64
  1289. prevCount int64
  1290. nextI int
  1291. )
  1292. appendDelta := func(count int64) {
  1293. *spans[len(spans)-1].Length++
  1294. deltas = append(deltas, count-prevCount)
  1295. prevCount = count
  1296. }
  1297. for n, i := range ii {
  1298. v, _ := buckets.Load(i)
  1299. count := atomic.LoadInt64(v.(*int64))
  1300. // Multiple spans with only small gaps in between are probably
  1301. // encoded more efficiently as one larger span with a few empty
  1302. // buckets. Needs some research to find the sweet spot. For now,
  1303. // we assume that gaps of one or two buckets should not create
  1304. // a new span.
  1305. iDelta := int32(i - nextI)
  1306. if n == 0 || iDelta > 2 {
  1307. // We have to create a new span, either because we are
  1308. // at the very beginning, or because we have found a gap
  1309. // of more than two buckets.
  1310. spans = append(spans, &dto.BucketSpan{
  1311. Offset: proto.Int32(iDelta),
  1312. Length: proto.Uint32(0),
  1313. })
  1314. } else {
  1315. // We have found a small gap (or no gap at all).
  1316. // Insert empty buckets as needed.
  1317. for j := int32(0); j < iDelta; j++ {
  1318. appendDelta(0)
  1319. }
  1320. }
  1321. appendDelta(count)
  1322. nextI = i + 1
  1323. }
  1324. return spans, deltas
  1325. }
  1326. // addToBucket increments the sparse bucket at key by the provided amount. It
  1327. // returns true if a new sparse bucket had to be created for that.
  1328. func addToBucket(buckets *sync.Map, key int, increment int64) bool {
  1329. if existingBucket, ok := buckets.Load(key); ok {
  1330. // Fast path without allocation.
  1331. atomic.AddInt64(existingBucket.(*int64), increment)
  1332. return false
  1333. }
  1334. // Bucket doesn't exist yet. Slow path allocating new counter.
  1335. newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
  1336. if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
  1337. // The bucket was created concurrently in another goroutine.
  1338. // Have to increment after all.
  1339. atomic.AddInt64(actualBucket.(*int64), increment)
  1340. return false
  1341. }
  1342. return true
  1343. }
  1344. // addAndReset returns a function to be used with sync.Map.Range of spare
  1345. // buckets in coldCounts. It increments the buckets in the provided hotBuckets
  1346. // according to the buckets ranged through. It then resets all buckets ranged
  1347. // through to 0 (but leaves them in place so that they don't need to get
  1348. // recreated on the next scrape).
  1349. func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
  1350. return func(k, v interface{}) bool {
  1351. bucket := v.(*int64)
  1352. if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
  1353. atomic.AddUint32(bucketNumber, 1)
  1354. }
  1355. atomic.StoreInt64(bucket, 0)
  1356. return true
  1357. }
  1358. }
  1359. func deleteSyncMap(m *sync.Map) {
  1360. m.Range(func(k, v interface{}) bool {
  1361. m.Delete(k)
  1362. return true
  1363. })
  1364. }
  1365. func findSmallestKey(m *sync.Map) int {
  1366. result := math.MaxInt32
  1367. m.Range(func(k, v interface{}) bool {
  1368. key := k.(int)
  1369. if key < result {
  1370. result = key
  1371. }
  1372. return true
  1373. })
  1374. return result
  1375. }
  1376. func getLe(key int, schema int32) float64 {
  1377. // Here a bit of context about the behavior for the last bucket counting
  1378. // regular numbers (called simply "last bucket" below) and the bucket
  1379. // counting observations of ±Inf (called "inf bucket" below, with a key
  1380. // one higher than that of the "last bucket"):
  1381. //
  1382. // If we apply the usual formula to the last bucket, its upper bound
  1383. // would be calculated as +Inf. The reason is that the max possible
  1384. // regular float64 number (math.MaxFloat64) doesn't coincide with one of
  1385. // the calculated bucket boundaries. So the calculated boundary has to
  1386. // be larger than math.MaxFloat64, and the only float64 larger than
  1387. // math.MaxFloat64 is +Inf. However, we want to count actual
  1388. // observations of ±Inf in the inf bucket. Therefore, we have to treat
  1389. // the upper bound of the last bucket specially and set it to
  1390. // math.MaxFloat64. (The upper bound of the inf bucket, with its key
  1391. // being one higher than that of the last bucket, naturally comes out as
  1392. // +Inf by the usual formula. So that's fine.)
  1393. //
  1394. // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
  1395. // 1024. If there were a float64 number following math.MaxFloat64, it
  1396. // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
  1397. // of 0.5 and an exp of 1025. However, since frac must be smaller than
  1398. // 1, and exp must be smaller than 1025, either representation overflows
  1399. // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
  1400. // largest possible float64. Q.E.D.) However, the formula for
  1401. // calculating the upper bound from the idx and schema of the last
  1402. // bucket results in precisely that. It is either frac=1.0 & exp=1024
  1403. // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
  1404. // by the way, a power of two where the exponent itself is a power of
  1405. // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
  1406. // schemas.) So these are the special cases we have to catch below.
  1407. if schema < 0 {
  1408. exp := key << -schema
  1409. if exp == 1024 {
  1410. // This is the last bucket before the overflow bucket
  1411. // (for ±Inf observations). Return math.MaxFloat64 as
  1412. // explained above.
  1413. return math.MaxFloat64
  1414. }
  1415. return math.Ldexp(1, exp)
  1416. }
  1417. fracIdx := key & ((1 << schema) - 1)
  1418. frac := nativeHistogramBounds[schema][fracIdx]
  1419. exp := (key >> schema) + 1
  1420. if frac == 0.5 && exp == 1025 {
  1421. // This is the last bucket before the overflow bucket (for ±Inf
  1422. // observations). Return math.MaxFloat64 as explained above.
  1423. return math.MaxFloat64
  1424. }
  1425. return math.Ldexp(frac, exp)
  1426. }
  1427. // waitForCooldown returns after the count field in the provided histogramCounts
  1428. // has reached the provided count value.
  1429. func waitForCooldown(count uint64, counts *histogramCounts) {
  1430. for count != atomic.LoadUint64(&counts.count) {
  1431. runtime.Gosched() // Let observations get work done.
  1432. }
  1433. }
  1434. // atomicAddFloat adds the provided float atomically to another float
  1435. // represented by the bit pattern the bits pointer is pointing to.
  1436. func atomicAddFloat(bits *uint64, v float64) {
  1437. for {
  1438. loadedBits := atomic.LoadUint64(bits)
  1439. newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
  1440. if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
  1441. break
  1442. }
  1443. }
  1444. }
  1445. // atomicDecUint32 atomically decrements the uint32 p points to. See
  1446. // https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
  1447. func atomicDecUint32(p *uint32) {
  1448. atomic.AddUint32(p, ^uint32(0))
  1449. }
  1450. // addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
  1451. // bucket) from the cold counts to the corresponding fields in the hot
  1452. // counts. Those fields are then reset to 0 in the cold counts.
  1453. func addAndResetCounts(hot, cold *histogramCounts) {
  1454. atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
  1455. atomic.StoreUint64(&cold.count, 0)
  1456. coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
  1457. atomicAddFloat(&hot.sumBits, coldSum)
  1458. atomic.StoreUint64(&cold.sumBits, 0)
  1459. for i := range hot.buckets {
  1460. atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
  1461. atomic.StoreUint64(&cold.buckets[i], 0)
  1462. }
  1463. atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
  1464. atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
  1465. }