generated_metrics.go 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823
  1. // Code generated by mdatagen. DO NOT EDIT.
  2. package metadata
  3. import (
  4. "fmt"
  5. "strconv"
  6. "time"
  7. "go.opentelemetry.io/collector/component"
  8. "go.opentelemetry.io/collector/pdata/pcommon"
  9. "go.opentelemetry.io/collector/pdata/pmetric"
  10. "go.opentelemetry.io/collector/receiver"
  11. )
  12. // AttributeStatusCode specifies the a value status_code attribute.
  13. type AttributeStatusCode int
  14. const (
  15. _ AttributeStatusCode = iota
  16. AttributeStatusCode1xx
  17. AttributeStatusCode2xx
  18. AttributeStatusCode3xx
  19. AttributeStatusCode4xx
  20. AttributeStatusCode5xx
  21. AttributeStatusCodeOther
  22. )
  23. // String returns the string representation of the AttributeStatusCode.
  24. func (av AttributeStatusCode) String() string {
  25. switch av {
  26. case AttributeStatusCode1xx:
  27. return "1xx"
  28. case AttributeStatusCode2xx:
  29. return "2xx"
  30. case AttributeStatusCode3xx:
  31. return "3xx"
  32. case AttributeStatusCode4xx:
  33. return "4xx"
  34. case AttributeStatusCode5xx:
  35. return "5xx"
  36. case AttributeStatusCodeOther:
  37. return "other"
  38. }
  39. return ""
  40. }
  41. // MapAttributeStatusCode is a helper map of string to AttributeStatusCode attribute value.
  42. var MapAttributeStatusCode = map[string]AttributeStatusCode{
  43. "1xx": AttributeStatusCode1xx,
  44. "2xx": AttributeStatusCode2xx,
  45. "3xx": AttributeStatusCode3xx,
  46. "4xx": AttributeStatusCode4xx,
  47. "5xx": AttributeStatusCode5xx,
  48. "other": AttributeStatusCodeOther,
  49. }
  50. type metricHaproxyBytesInput struct {
  51. data pmetric.Metric // data buffer for generated metric.
  52. config MetricConfig // metric config provided by user.
  53. capacity int // max observed number of data points added to the metric.
  54. }
  55. // init fills haproxy.bytes.input metric with initial data.
  56. func (m *metricHaproxyBytesInput) init() {
  57. m.data.SetName("haproxy.bytes.input")
  58. m.data.SetDescription("Bytes in. Corresponds to HAProxy's `bin` metric.")
  59. m.data.SetUnit("by")
  60. m.data.SetEmptySum()
  61. m.data.Sum().SetIsMonotonic(true)
  62. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  63. }
  64. func (m *metricHaproxyBytesInput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  65. if !m.config.Enabled {
  66. return
  67. }
  68. dp := m.data.Sum().DataPoints().AppendEmpty()
  69. dp.SetStartTimestamp(start)
  70. dp.SetTimestamp(ts)
  71. dp.SetIntValue(val)
  72. }
  73. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  74. func (m *metricHaproxyBytesInput) updateCapacity() {
  75. if m.data.Sum().DataPoints().Len() > m.capacity {
  76. m.capacity = m.data.Sum().DataPoints().Len()
  77. }
  78. }
  79. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  80. func (m *metricHaproxyBytesInput) emit(metrics pmetric.MetricSlice) {
  81. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  82. m.updateCapacity()
  83. m.data.MoveTo(metrics.AppendEmpty())
  84. m.init()
  85. }
  86. }
  87. func newMetricHaproxyBytesInput(cfg MetricConfig) metricHaproxyBytesInput {
  88. m := metricHaproxyBytesInput{config: cfg}
  89. if cfg.Enabled {
  90. m.data = pmetric.NewMetric()
  91. m.init()
  92. }
  93. return m
  94. }
  95. type metricHaproxyBytesOutput struct {
  96. data pmetric.Metric // data buffer for generated metric.
  97. config MetricConfig // metric config provided by user.
  98. capacity int // max observed number of data points added to the metric.
  99. }
  100. // init fills haproxy.bytes.output metric with initial data.
  101. func (m *metricHaproxyBytesOutput) init() {
  102. m.data.SetName("haproxy.bytes.output")
  103. m.data.SetDescription("Bytes out. Corresponds to HAProxy's `bout` metric.")
  104. m.data.SetUnit("by")
  105. m.data.SetEmptySum()
  106. m.data.Sum().SetIsMonotonic(true)
  107. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  108. }
  109. func (m *metricHaproxyBytesOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  110. if !m.config.Enabled {
  111. return
  112. }
  113. dp := m.data.Sum().DataPoints().AppendEmpty()
  114. dp.SetStartTimestamp(start)
  115. dp.SetTimestamp(ts)
  116. dp.SetIntValue(val)
  117. }
  118. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  119. func (m *metricHaproxyBytesOutput) updateCapacity() {
  120. if m.data.Sum().DataPoints().Len() > m.capacity {
  121. m.capacity = m.data.Sum().DataPoints().Len()
  122. }
  123. }
  124. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  125. func (m *metricHaproxyBytesOutput) emit(metrics pmetric.MetricSlice) {
  126. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  127. m.updateCapacity()
  128. m.data.MoveTo(metrics.AppendEmpty())
  129. m.init()
  130. }
  131. }
  132. func newMetricHaproxyBytesOutput(cfg MetricConfig) metricHaproxyBytesOutput {
  133. m := metricHaproxyBytesOutput{config: cfg}
  134. if cfg.Enabled {
  135. m.data = pmetric.NewMetric()
  136. m.init()
  137. }
  138. return m
  139. }
  140. type metricHaproxyClientsCanceled struct {
  141. data pmetric.Metric // data buffer for generated metric.
  142. config MetricConfig // metric config provided by user.
  143. capacity int // max observed number of data points added to the metric.
  144. }
  145. // init fills haproxy.clients.canceled metric with initial data.
  146. func (m *metricHaproxyClientsCanceled) init() {
  147. m.data.SetName("haproxy.clients.canceled")
  148. m.data.SetDescription("Number of data transfers aborted by the client. Corresponds to HAProxy's `cli_abrt` metric")
  149. m.data.SetUnit("{cancellations}")
  150. m.data.SetEmptySum()
  151. m.data.Sum().SetIsMonotonic(true)
  152. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  153. }
  154. func (m *metricHaproxyClientsCanceled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  155. if !m.config.Enabled {
  156. return
  157. }
  158. dp := m.data.Sum().DataPoints().AppendEmpty()
  159. dp.SetStartTimestamp(start)
  160. dp.SetTimestamp(ts)
  161. dp.SetIntValue(val)
  162. }
  163. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  164. func (m *metricHaproxyClientsCanceled) updateCapacity() {
  165. if m.data.Sum().DataPoints().Len() > m.capacity {
  166. m.capacity = m.data.Sum().DataPoints().Len()
  167. }
  168. }
  169. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  170. func (m *metricHaproxyClientsCanceled) emit(metrics pmetric.MetricSlice) {
  171. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  172. m.updateCapacity()
  173. m.data.MoveTo(metrics.AppendEmpty())
  174. m.init()
  175. }
  176. }
  177. func newMetricHaproxyClientsCanceled(cfg MetricConfig) metricHaproxyClientsCanceled {
  178. m := metricHaproxyClientsCanceled{config: cfg}
  179. if cfg.Enabled {
  180. m.data = pmetric.NewMetric()
  181. m.init()
  182. }
  183. return m
  184. }
  185. type metricHaproxyCompressionBypass struct {
  186. data pmetric.Metric // data buffer for generated metric.
  187. config MetricConfig // metric config provided by user.
  188. capacity int // max observed number of data points added to the metric.
  189. }
  190. // init fills haproxy.compression.bypass metric with initial data.
  191. func (m *metricHaproxyCompressionBypass) init() {
  192. m.data.SetName("haproxy.compression.bypass")
  193. m.data.SetDescription("Number of bytes that bypassed the HTTP compressor (CPU/BW limit). Corresponds to HAProxy's `comp_byp` metric.")
  194. m.data.SetUnit("by")
  195. m.data.SetEmptySum()
  196. m.data.Sum().SetIsMonotonic(true)
  197. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  198. }
  199. func (m *metricHaproxyCompressionBypass) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  200. if !m.config.Enabled {
  201. return
  202. }
  203. dp := m.data.Sum().DataPoints().AppendEmpty()
  204. dp.SetStartTimestamp(start)
  205. dp.SetTimestamp(ts)
  206. dp.SetIntValue(val)
  207. }
  208. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  209. func (m *metricHaproxyCompressionBypass) updateCapacity() {
  210. if m.data.Sum().DataPoints().Len() > m.capacity {
  211. m.capacity = m.data.Sum().DataPoints().Len()
  212. }
  213. }
  214. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  215. func (m *metricHaproxyCompressionBypass) emit(metrics pmetric.MetricSlice) {
  216. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  217. m.updateCapacity()
  218. m.data.MoveTo(metrics.AppendEmpty())
  219. m.init()
  220. }
  221. }
  222. func newMetricHaproxyCompressionBypass(cfg MetricConfig) metricHaproxyCompressionBypass {
  223. m := metricHaproxyCompressionBypass{config: cfg}
  224. if cfg.Enabled {
  225. m.data = pmetric.NewMetric()
  226. m.init()
  227. }
  228. return m
  229. }
  230. type metricHaproxyCompressionCount struct {
  231. data pmetric.Metric // data buffer for generated metric.
  232. config MetricConfig // metric config provided by user.
  233. capacity int // max observed number of data points added to the metric.
  234. }
  235. // init fills haproxy.compression.count metric with initial data.
  236. func (m *metricHaproxyCompressionCount) init() {
  237. m.data.SetName("haproxy.compression.count")
  238. m.data.SetDescription("Number of HTTP responses that were compressed. Corresponds to HAProxy's `comp_rsp` metric.")
  239. m.data.SetUnit("{responses}")
  240. m.data.SetEmptySum()
  241. m.data.Sum().SetIsMonotonic(true)
  242. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  243. }
  244. func (m *metricHaproxyCompressionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  245. if !m.config.Enabled {
  246. return
  247. }
  248. dp := m.data.Sum().DataPoints().AppendEmpty()
  249. dp.SetStartTimestamp(start)
  250. dp.SetTimestamp(ts)
  251. dp.SetIntValue(val)
  252. }
  253. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  254. func (m *metricHaproxyCompressionCount) updateCapacity() {
  255. if m.data.Sum().DataPoints().Len() > m.capacity {
  256. m.capacity = m.data.Sum().DataPoints().Len()
  257. }
  258. }
  259. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  260. func (m *metricHaproxyCompressionCount) emit(metrics pmetric.MetricSlice) {
  261. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  262. m.updateCapacity()
  263. m.data.MoveTo(metrics.AppendEmpty())
  264. m.init()
  265. }
  266. }
  267. func newMetricHaproxyCompressionCount(cfg MetricConfig) metricHaproxyCompressionCount {
  268. m := metricHaproxyCompressionCount{config: cfg}
  269. if cfg.Enabled {
  270. m.data = pmetric.NewMetric()
  271. m.init()
  272. }
  273. return m
  274. }
  275. type metricHaproxyCompressionInput struct {
  276. data pmetric.Metric // data buffer for generated metric.
  277. config MetricConfig // metric config provided by user.
  278. capacity int // max observed number of data points added to the metric.
  279. }
  280. // init fills haproxy.compression.input metric with initial data.
  281. func (m *metricHaproxyCompressionInput) init() {
  282. m.data.SetName("haproxy.compression.input")
  283. m.data.SetDescription("Number of HTTP response bytes fed to the compressor. Corresponds to HAProxy's `comp_in` metric.")
  284. m.data.SetUnit("by")
  285. m.data.SetEmptySum()
  286. m.data.Sum().SetIsMonotonic(true)
  287. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  288. }
  289. func (m *metricHaproxyCompressionInput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  290. if !m.config.Enabled {
  291. return
  292. }
  293. dp := m.data.Sum().DataPoints().AppendEmpty()
  294. dp.SetStartTimestamp(start)
  295. dp.SetTimestamp(ts)
  296. dp.SetIntValue(val)
  297. }
  298. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  299. func (m *metricHaproxyCompressionInput) updateCapacity() {
  300. if m.data.Sum().DataPoints().Len() > m.capacity {
  301. m.capacity = m.data.Sum().DataPoints().Len()
  302. }
  303. }
  304. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  305. func (m *metricHaproxyCompressionInput) emit(metrics pmetric.MetricSlice) {
  306. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  307. m.updateCapacity()
  308. m.data.MoveTo(metrics.AppendEmpty())
  309. m.init()
  310. }
  311. }
  312. func newMetricHaproxyCompressionInput(cfg MetricConfig) metricHaproxyCompressionInput {
  313. m := metricHaproxyCompressionInput{config: cfg}
  314. if cfg.Enabled {
  315. m.data = pmetric.NewMetric()
  316. m.init()
  317. }
  318. return m
  319. }
  320. type metricHaproxyCompressionOutput struct {
  321. data pmetric.Metric // data buffer for generated metric.
  322. config MetricConfig // metric config provided by user.
  323. capacity int // max observed number of data points added to the metric.
  324. }
  325. // init fills haproxy.compression.output metric with initial data.
  326. func (m *metricHaproxyCompressionOutput) init() {
  327. m.data.SetName("haproxy.compression.output")
  328. m.data.SetDescription("Number of HTTP response bytes emitted by the compressor. Corresponds to HAProxy's `comp_out` metric.")
  329. m.data.SetUnit("by")
  330. m.data.SetEmptySum()
  331. m.data.Sum().SetIsMonotonic(true)
  332. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  333. }
  334. func (m *metricHaproxyCompressionOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  335. if !m.config.Enabled {
  336. return
  337. }
  338. dp := m.data.Sum().DataPoints().AppendEmpty()
  339. dp.SetStartTimestamp(start)
  340. dp.SetTimestamp(ts)
  341. dp.SetIntValue(val)
  342. }
  343. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  344. func (m *metricHaproxyCompressionOutput) updateCapacity() {
  345. if m.data.Sum().DataPoints().Len() > m.capacity {
  346. m.capacity = m.data.Sum().DataPoints().Len()
  347. }
  348. }
  349. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  350. func (m *metricHaproxyCompressionOutput) emit(metrics pmetric.MetricSlice) {
  351. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  352. m.updateCapacity()
  353. m.data.MoveTo(metrics.AppendEmpty())
  354. m.init()
  355. }
  356. }
  357. func newMetricHaproxyCompressionOutput(cfg MetricConfig) metricHaproxyCompressionOutput {
  358. m := metricHaproxyCompressionOutput{config: cfg}
  359. if cfg.Enabled {
  360. m.data = pmetric.NewMetric()
  361. m.init()
  362. }
  363. return m
  364. }
  365. type metricHaproxyConnectionsErrors struct {
  366. data pmetric.Metric // data buffer for generated metric.
  367. config MetricConfig // metric config provided by user.
  368. capacity int // max observed number of data points added to the metric.
  369. }
  370. // init fills haproxy.connections.errors metric with initial data.
  371. func (m *metricHaproxyConnectionsErrors) init() {
  372. m.data.SetName("haproxy.connections.errors")
  373. m.data.SetDescription("Number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat. Corresponds to HAProxy's `econ` metric")
  374. m.data.SetUnit("{errors}")
  375. m.data.SetEmptySum()
  376. m.data.Sum().SetIsMonotonic(true)
  377. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  378. }
  379. func (m *metricHaproxyConnectionsErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  380. if !m.config.Enabled {
  381. return
  382. }
  383. dp := m.data.Sum().DataPoints().AppendEmpty()
  384. dp.SetStartTimestamp(start)
  385. dp.SetTimestamp(ts)
  386. dp.SetIntValue(val)
  387. }
  388. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  389. func (m *metricHaproxyConnectionsErrors) updateCapacity() {
  390. if m.data.Sum().DataPoints().Len() > m.capacity {
  391. m.capacity = m.data.Sum().DataPoints().Len()
  392. }
  393. }
  394. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  395. func (m *metricHaproxyConnectionsErrors) emit(metrics pmetric.MetricSlice) {
  396. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  397. m.updateCapacity()
  398. m.data.MoveTo(metrics.AppendEmpty())
  399. m.init()
  400. }
  401. }
  402. func newMetricHaproxyConnectionsErrors(cfg MetricConfig) metricHaproxyConnectionsErrors {
  403. m := metricHaproxyConnectionsErrors{config: cfg}
  404. if cfg.Enabled {
  405. m.data = pmetric.NewMetric()
  406. m.init()
  407. }
  408. return m
  409. }
  410. type metricHaproxyConnectionsRate struct {
  411. data pmetric.Metric // data buffer for generated metric.
  412. config MetricConfig // metric config provided by user.
  413. capacity int // max observed number of data points added to the metric.
  414. }
  415. // init fills haproxy.connections.rate metric with initial data.
  416. func (m *metricHaproxyConnectionsRate) init() {
  417. m.data.SetName("haproxy.connections.rate")
  418. m.data.SetDescription("Number of connections over the last elapsed second (frontend). Corresponds to HAProxy's `conn_rate` metric.")
  419. m.data.SetUnit("{connections}")
  420. m.data.SetEmptyGauge()
  421. }
  422. func (m *metricHaproxyConnectionsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  423. if !m.config.Enabled {
  424. return
  425. }
  426. dp := m.data.Gauge().DataPoints().AppendEmpty()
  427. dp.SetStartTimestamp(start)
  428. dp.SetTimestamp(ts)
  429. dp.SetIntValue(val)
  430. }
  431. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  432. func (m *metricHaproxyConnectionsRate) updateCapacity() {
  433. if m.data.Gauge().DataPoints().Len() > m.capacity {
  434. m.capacity = m.data.Gauge().DataPoints().Len()
  435. }
  436. }
  437. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  438. func (m *metricHaproxyConnectionsRate) emit(metrics pmetric.MetricSlice) {
  439. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  440. m.updateCapacity()
  441. m.data.MoveTo(metrics.AppendEmpty())
  442. m.init()
  443. }
  444. }
  445. func newMetricHaproxyConnectionsRate(cfg MetricConfig) metricHaproxyConnectionsRate {
  446. m := metricHaproxyConnectionsRate{config: cfg}
  447. if cfg.Enabled {
  448. m.data = pmetric.NewMetric()
  449. m.init()
  450. }
  451. return m
  452. }
  453. type metricHaproxyConnectionsRetries struct {
  454. data pmetric.Metric // data buffer for generated metric.
  455. config MetricConfig // metric config provided by user.
  456. capacity int // max observed number of data points added to the metric.
  457. }
  458. // init fills haproxy.connections.retries metric with initial data.
  459. func (m *metricHaproxyConnectionsRetries) init() {
  460. m.data.SetName("haproxy.connections.retries")
  461. m.data.SetDescription("Number of times a connection to a server was retried. Corresponds to HAProxy's `wretr` metric.")
  462. m.data.SetUnit("{retries}")
  463. m.data.SetEmptySum()
  464. m.data.Sum().SetIsMonotonic(true)
  465. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  466. }
  467. func (m *metricHaproxyConnectionsRetries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  468. if !m.config.Enabled {
  469. return
  470. }
  471. dp := m.data.Sum().DataPoints().AppendEmpty()
  472. dp.SetStartTimestamp(start)
  473. dp.SetTimestamp(ts)
  474. dp.SetIntValue(val)
  475. }
  476. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  477. func (m *metricHaproxyConnectionsRetries) updateCapacity() {
  478. if m.data.Sum().DataPoints().Len() > m.capacity {
  479. m.capacity = m.data.Sum().DataPoints().Len()
  480. }
  481. }
  482. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  483. func (m *metricHaproxyConnectionsRetries) emit(metrics pmetric.MetricSlice) {
  484. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  485. m.updateCapacity()
  486. m.data.MoveTo(metrics.AppendEmpty())
  487. m.init()
  488. }
  489. }
  490. func newMetricHaproxyConnectionsRetries(cfg MetricConfig) metricHaproxyConnectionsRetries {
  491. m := metricHaproxyConnectionsRetries{config: cfg}
  492. if cfg.Enabled {
  493. m.data = pmetric.NewMetric()
  494. m.init()
  495. }
  496. return m
  497. }
  498. type metricHaproxyConnectionsTotal struct {
  499. data pmetric.Metric // data buffer for generated metric.
  500. config MetricConfig // metric config provided by user.
  501. capacity int // max observed number of data points added to the metric.
  502. }
  503. // init fills haproxy.connections.total metric with initial data.
  504. func (m *metricHaproxyConnectionsTotal) init() {
  505. m.data.SetName("haproxy.connections.total")
  506. m.data.SetDescription("Cumulative number of connections (frontend). Corresponds to HAProxy's `conn_tot` metric.")
  507. m.data.SetUnit("{connections}")
  508. m.data.SetEmptySum()
  509. m.data.Sum().SetIsMonotonic(true)
  510. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  511. }
  512. func (m *metricHaproxyConnectionsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  513. if !m.config.Enabled {
  514. return
  515. }
  516. dp := m.data.Sum().DataPoints().AppendEmpty()
  517. dp.SetStartTimestamp(start)
  518. dp.SetTimestamp(ts)
  519. dp.SetIntValue(val)
  520. }
  521. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  522. func (m *metricHaproxyConnectionsTotal) updateCapacity() {
  523. if m.data.Sum().DataPoints().Len() > m.capacity {
  524. m.capacity = m.data.Sum().DataPoints().Len()
  525. }
  526. }
  527. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  528. func (m *metricHaproxyConnectionsTotal) emit(metrics pmetric.MetricSlice) {
  529. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  530. m.updateCapacity()
  531. m.data.MoveTo(metrics.AppendEmpty())
  532. m.init()
  533. }
  534. }
  535. func newMetricHaproxyConnectionsTotal(cfg MetricConfig) metricHaproxyConnectionsTotal {
  536. m := metricHaproxyConnectionsTotal{config: cfg}
  537. if cfg.Enabled {
  538. m.data = pmetric.NewMetric()
  539. m.init()
  540. }
  541. return m
  542. }
  543. type metricHaproxyDowntime struct {
  544. data pmetric.Metric // data buffer for generated metric.
  545. config MetricConfig // metric config provided by user.
  546. capacity int // max observed number of data points added to the metric.
  547. }
  548. // init fills haproxy.downtime metric with initial data.
  549. func (m *metricHaproxyDowntime) init() {
  550. m.data.SetName("haproxy.downtime")
  551. m.data.SetDescription("Total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime. Corresponds to HAProxy's `downtime` metric")
  552. m.data.SetUnit("s")
  553. m.data.SetEmptySum()
  554. m.data.Sum().SetIsMonotonic(true)
  555. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  556. }
  557. func (m *metricHaproxyDowntime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  558. if !m.config.Enabled {
  559. return
  560. }
  561. dp := m.data.Sum().DataPoints().AppendEmpty()
  562. dp.SetStartTimestamp(start)
  563. dp.SetTimestamp(ts)
  564. dp.SetIntValue(val)
  565. }
  566. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  567. func (m *metricHaproxyDowntime) updateCapacity() {
  568. if m.data.Sum().DataPoints().Len() > m.capacity {
  569. m.capacity = m.data.Sum().DataPoints().Len()
  570. }
  571. }
  572. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  573. func (m *metricHaproxyDowntime) emit(metrics pmetric.MetricSlice) {
  574. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  575. m.updateCapacity()
  576. m.data.MoveTo(metrics.AppendEmpty())
  577. m.init()
  578. }
  579. }
  580. func newMetricHaproxyDowntime(cfg MetricConfig) metricHaproxyDowntime {
  581. m := metricHaproxyDowntime{config: cfg}
  582. if cfg.Enabled {
  583. m.data = pmetric.NewMetric()
  584. m.init()
  585. }
  586. return m
  587. }
  588. type metricHaproxyFailedChecks struct {
  589. data pmetric.Metric // data buffer for generated metric.
  590. config MetricConfig // metric config provided by user.
  591. capacity int // max observed number of data points added to the metric.
  592. }
  593. // init fills haproxy.failed_checks metric with initial data.
  594. func (m *metricHaproxyFailedChecks) init() {
  595. m.data.SetName("haproxy.failed_checks")
  596. m.data.SetDescription("Number of failed checks. (Only counts checks failed when the server is up). Corresponds to HAProxy's `chkfail` metric.")
  597. m.data.SetUnit("{checks}")
  598. m.data.SetEmptySum()
  599. m.data.Sum().SetIsMonotonic(true)
  600. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  601. }
  602. func (m *metricHaproxyFailedChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  603. if !m.config.Enabled {
  604. return
  605. }
  606. dp := m.data.Sum().DataPoints().AppendEmpty()
  607. dp.SetStartTimestamp(start)
  608. dp.SetTimestamp(ts)
  609. dp.SetIntValue(val)
  610. }
  611. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  612. func (m *metricHaproxyFailedChecks) updateCapacity() {
  613. if m.data.Sum().DataPoints().Len() > m.capacity {
  614. m.capacity = m.data.Sum().DataPoints().Len()
  615. }
  616. }
  617. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  618. func (m *metricHaproxyFailedChecks) emit(metrics pmetric.MetricSlice) {
  619. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  620. m.updateCapacity()
  621. m.data.MoveTo(metrics.AppendEmpty())
  622. m.init()
  623. }
  624. }
  625. func newMetricHaproxyFailedChecks(cfg MetricConfig) metricHaproxyFailedChecks {
  626. m := metricHaproxyFailedChecks{config: cfg}
  627. if cfg.Enabled {
  628. m.data = pmetric.NewMetric()
  629. m.init()
  630. }
  631. return m
  632. }
  633. type metricHaproxyRequestsDenied struct {
  634. data pmetric.Metric // data buffer for generated metric.
  635. config MetricConfig // metric config provided by user.
  636. capacity int // max observed number of data points added to the metric.
  637. }
  638. // init fills haproxy.requests.denied metric with initial data.
  639. func (m *metricHaproxyRequestsDenied) init() {
  640. m.data.SetName("haproxy.requests.denied")
  641. m.data.SetDescription("Requests denied because of security concerns. Corresponds to HAProxy's `dreq` metric")
  642. m.data.SetUnit("{requests}")
  643. m.data.SetEmptySum()
  644. m.data.Sum().SetIsMonotonic(true)
  645. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  646. }
  647. func (m *metricHaproxyRequestsDenied) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  648. if !m.config.Enabled {
  649. return
  650. }
  651. dp := m.data.Sum().DataPoints().AppendEmpty()
  652. dp.SetStartTimestamp(start)
  653. dp.SetTimestamp(ts)
  654. dp.SetIntValue(val)
  655. }
  656. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  657. func (m *metricHaproxyRequestsDenied) updateCapacity() {
  658. if m.data.Sum().DataPoints().Len() > m.capacity {
  659. m.capacity = m.data.Sum().DataPoints().Len()
  660. }
  661. }
  662. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  663. func (m *metricHaproxyRequestsDenied) emit(metrics pmetric.MetricSlice) {
  664. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  665. m.updateCapacity()
  666. m.data.MoveTo(metrics.AppendEmpty())
  667. m.init()
  668. }
  669. }
  670. func newMetricHaproxyRequestsDenied(cfg MetricConfig) metricHaproxyRequestsDenied {
  671. m := metricHaproxyRequestsDenied{config: cfg}
  672. if cfg.Enabled {
  673. m.data = pmetric.NewMetric()
  674. m.init()
  675. }
  676. return m
  677. }
  678. type metricHaproxyRequestsErrors struct {
  679. data pmetric.Metric // data buffer for generated metric.
  680. config MetricConfig // metric config provided by user.
  681. capacity int // max observed number of data points added to the metric.
  682. }
  683. // init fills haproxy.requests.errors metric with initial data.
  684. func (m *metricHaproxyRequestsErrors) init() {
  685. m.data.SetName("haproxy.requests.errors")
  686. m.data.SetDescription("Cumulative number of request errors. Corresponds to HAProxy's `ereq` metric.")
  687. m.data.SetUnit("{errors}")
  688. m.data.SetEmptySum()
  689. m.data.Sum().SetIsMonotonic(true)
  690. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  691. }
  692. func (m *metricHaproxyRequestsErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  693. if !m.config.Enabled {
  694. return
  695. }
  696. dp := m.data.Sum().DataPoints().AppendEmpty()
  697. dp.SetStartTimestamp(start)
  698. dp.SetTimestamp(ts)
  699. dp.SetIntValue(val)
  700. }
  701. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  702. func (m *metricHaproxyRequestsErrors) updateCapacity() {
  703. if m.data.Sum().DataPoints().Len() > m.capacity {
  704. m.capacity = m.data.Sum().DataPoints().Len()
  705. }
  706. }
  707. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  708. func (m *metricHaproxyRequestsErrors) emit(metrics pmetric.MetricSlice) {
  709. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  710. m.updateCapacity()
  711. m.data.MoveTo(metrics.AppendEmpty())
  712. m.init()
  713. }
  714. }
  715. func newMetricHaproxyRequestsErrors(cfg MetricConfig) metricHaproxyRequestsErrors {
  716. m := metricHaproxyRequestsErrors{config: cfg}
  717. if cfg.Enabled {
  718. m.data = pmetric.NewMetric()
  719. m.init()
  720. }
  721. return m
  722. }
  723. type metricHaproxyRequestsQueued struct {
  724. data pmetric.Metric // data buffer for generated metric.
  725. config MetricConfig // metric config provided by user.
  726. capacity int // max observed number of data points added to the metric.
  727. }
  728. // init fills haproxy.requests.queued metric with initial data.
  729. func (m *metricHaproxyRequestsQueued) init() {
  730. m.data.SetName("haproxy.requests.queued")
  731. m.data.SetDescription("Current queued requests. For the backend this reports the number queued without a server assigned. Corresponds to HAProxy's `qcur` metric.")
  732. m.data.SetUnit("{requests}")
  733. m.data.SetEmptySum()
  734. m.data.Sum().SetIsMonotonic(true)
  735. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  736. }
  737. func (m *metricHaproxyRequestsQueued) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  738. if !m.config.Enabled {
  739. return
  740. }
  741. dp := m.data.Sum().DataPoints().AppendEmpty()
  742. dp.SetStartTimestamp(start)
  743. dp.SetTimestamp(ts)
  744. dp.SetIntValue(val)
  745. }
  746. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  747. func (m *metricHaproxyRequestsQueued) updateCapacity() {
  748. if m.data.Sum().DataPoints().Len() > m.capacity {
  749. m.capacity = m.data.Sum().DataPoints().Len()
  750. }
  751. }
  752. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  753. func (m *metricHaproxyRequestsQueued) emit(metrics pmetric.MetricSlice) {
  754. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  755. m.updateCapacity()
  756. m.data.MoveTo(metrics.AppendEmpty())
  757. m.init()
  758. }
  759. }
  760. func newMetricHaproxyRequestsQueued(cfg MetricConfig) metricHaproxyRequestsQueued {
  761. m := metricHaproxyRequestsQueued{config: cfg}
  762. if cfg.Enabled {
  763. m.data = pmetric.NewMetric()
  764. m.init()
  765. }
  766. return m
  767. }
  768. type metricHaproxyRequestsRate struct {
  769. data pmetric.Metric // data buffer for generated metric.
  770. config MetricConfig // metric config provided by user.
  771. capacity int // max observed number of data points added to the metric.
  772. }
  773. // init fills haproxy.requests.rate metric with initial data.
  774. func (m *metricHaproxyRequestsRate) init() {
  775. m.data.SetName("haproxy.requests.rate")
  776. m.data.SetDescription("HTTP requests per second over last elapsed second. Corresponds to HAProxy's `req_rate` metric.")
  777. m.data.SetUnit("{requests}")
  778. m.data.SetEmptyGauge()
  779. }
  780. func (m *metricHaproxyRequestsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
  781. if !m.config.Enabled {
  782. return
  783. }
  784. dp := m.data.Gauge().DataPoints().AppendEmpty()
  785. dp.SetStartTimestamp(start)
  786. dp.SetTimestamp(ts)
  787. dp.SetDoubleValue(val)
  788. }
  789. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  790. func (m *metricHaproxyRequestsRate) updateCapacity() {
  791. if m.data.Gauge().DataPoints().Len() > m.capacity {
  792. m.capacity = m.data.Gauge().DataPoints().Len()
  793. }
  794. }
  795. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  796. func (m *metricHaproxyRequestsRate) emit(metrics pmetric.MetricSlice) {
  797. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  798. m.updateCapacity()
  799. m.data.MoveTo(metrics.AppendEmpty())
  800. m.init()
  801. }
  802. }
  803. func newMetricHaproxyRequestsRate(cfg MetricConfig) metricHaproxyRequestsRate {
  804. m := metricHaproxyRequestsRate{config: cfg}
  805. if cfg.Enabled {
  806. m.data = pmetric.NewMetric()
  807. m.init()
  808. }
  809. return m
  810. }
  811. type metricHaproxyRequestsRedispatched struct {
  812. data pmetric.Metric // data buffer for generated metric.
  813. config MetricConfig // metric config provided by user.
  814. capacity int // max observed number of data points added to the metric.
  815. }
  816. // init fills haproxy.requests.redispatched metric with initial data.
  817. func (m *metricHaproxyRequestsRedispatched) init() {
  818. m.data.SetName("haproxy.requests.redispatched")
  819. m.data.SetDescription("Number of times a request was redispatched to another server. Corresponds to HAProxy's `wredis` metric.")
  820. m.data.SetUnit("{requests}")
  821. m.data.SetEmptySum()
  822. m.data.Sum().SetIsMonotonic(true)
  823. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  824. }
  825. func (m *metricHaproxyRequestsRedispatched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  826. if !m.config.Enabled {
  827. return
  828. }
  829. dp := m.data.Sum().DataPoints().AppendEmpty()
  830. dp.SetStartTimestamp(start)
  831. dp.SetTimestamp(ts)
  832. dp.SetIntValue(val)
  833. }
  834. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  835. func (m *metricHaproxyRequestsRedispatched) updateCapacity() {
  836. if m.data.Sum().DataPoints().Len() > m.capacity {
  837. m.capacity = m.data.Sum().DataPoints().Len()
  838. }
  839. }
  840. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  841. func (m *metricHaproxyRequestsRedispatched) emit(metrics pmetric.MetricSlice) {
  842. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  843. m.updateCapacity()
  844. m.data.MoveTo(metrics.AppendEmpty())
  845. m.init()
  846. }
  847. }
  848. func newMetricHaproxyRequestsRedispatched(cfg MetricConfig) metricHaproxyRequestsRedispatched {
  849. m := metricHaproxyRequestsRedispatched{config: cfg}
  850. if cfg.Enabled {
  851. m.data = pmetric.NewMetric()
  852. m.init()
  853. }
  854. return m
  855. }
  856. type metricHaproxyRequestsTotal struct {
  857. data pmetric.Metric // data buffer for generated metric.
  858. config MetricConfig // metric config provided by user.
  859. capacity int // max observed number of data points added to the metric.
  860. }
  861. // init fills haproxy.requests.total metric with initial data.
  862. func (m *metricHaproxyRequestsTotal) init() {
  863. m.data.SetName("haproxy.requests.total")
  864. m.data.SetDescription("Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics.")
  865. m.data.SetUnit("{requests}")
  866. m.data.SetEmptySum()
  867. m.data.Sum().SetIsMonotonic(true)
  868. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  869. m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
  870. }
  871. func (m *metricHaproxyRequestsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, statusCodeAttributeValue string) {
  872. if !m.config.Enabled {
  873. return
  874. }
  875. dp := m.data.Sum().DataPoints().AppendEmpty()
  876. dp.SetStartTimestamp(start)
  877. dp.SetTimestamp(ts)
  878. dp.SetIntValue(val)
  879. dp.Attributes().PutStr("status_code", statusCodeAttributeValue)
  880. }
  881. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  882. func (m *metricHaproxyRequestsTotal) updateCapacity() {
  883. if m.data.Sum().DataPoints().Len() > m.capacity {
  884. m.capacity = m.data.Sum().DataPoints().Len()
  885. }
  886. }
  887. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  888. func (m *metricHaproxyRequestsTotal) emit(metrics pmetric.MetricSlice) {
  889. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  890. m.updateCapacity()
  891. m.data.MoveTo(metrics.AppendEmpty())
  892. m.init()
  893. }
  894. }
  895. func newMetricHaproxyRequestsTotal(cfg MetricConfig) metricHaproxyRequestsTotal {
  896. m := metricHaproxyRequestsTotal{config: cfg}
  897. if cfg.Enabled {
  898. m.data = pmetric.NewMetric()
  899. m.init()
  900. }
  901. return m
  902. }
  903. type metricHaproxyResponsesDenied struct {
  904. data pmetric.Metric // data buffer for generated metric.
  905. config MetricConfig // metric config provided by user.
  906. capacity int // max observed number of data points added to the metric.
  907. }
  908. // init fills haproxy.responses.denied metric with initial data.
  909. func (m *metricHaproxyResponsesDenied) init() {
  910. m.data.SetName("haproxy.responses.denied")
  911. m.data.SetDescription("Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric")
  912. m.data.SetUnit("{responses}")
  913. m.data.SetEmptySum()
  914. m.data.Sum().SetIsMonotonic(true)
  915. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  916. }
  917. func (m *metricHaproxyResponsesDenied) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  918. if !m.config.Enabled {
  919. return
  920. }
  921. dp := m.data.Sum().DataPoints().AppendEmpty()
  922. dp.SetStartTimestamp(start)
  923. dp.SetTimestamp(ts)
  924. dp.SetIntValue(val)
  925. }
  926. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  927. func (m *metricHaproxyResponsesDenied) updateCapacity() {
  928. if m.data.Sum().DataPoints().Len() > m.capacity {
  929. m.capacity = m.data.Sum().DataPoints().Len()
  930. }
  931. }
  932. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  933. func (m *metricHaproxyResponsesDenied) emit(metrics pmetric.MetricSlice) {
  934. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  935. m.updateCapacity()
  936. m.data.MoveTo(metrics.AppendEmpty())
  937. m.init()
  938. }
  939. }
  940. func newMetricHaproxyResponsesDenied(cfg MetricConfig) metricHaproxyResponsesDenied {
  941. m := metricHaproxyResponsesDenied{config: cfg}
  942. if cfg.Enabled {
  943. m.data = pmetric.NewMetric()
  944. m.init()
  945. }
  946. return m
  947. }
  948. type metricHaproxyResponsesErrors struct {
  949. data pmetric.Metric // data buffer for generated metric.
  950. config MetricConfig // metric config provided by user.
  951. capacity int // max observed number of data points added to the metric.
  952. }
  953. // init fills haproxy.responses.errors metric with initial data.
  954. func (m *metricHaproxyResponsesErrors) init() {
  955. m.data.SetName("haproxy.responses.errors")
  956. m.data.SetDescription("Cumulative number of response errors. Corresponds to HAProxy's `eresp` metric, `srv_abrt` will be counted here also.")
  957. m.data.SetUnit("{errors}")
  958. m.data.SetEmptySum()
  959. m.data.Sum().SetIsMonotonic(true)
  960. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  961. }
  962. func (m *metricHaproxyResponsesErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  963. if !m.config.Enabled {
  964. return
  965. }
  966. dp := m.data.Sum().DataPoints().AppendEmpty()
  967. dp.SetStartTimestamp(start)
  968. dp.SetTimestamp(ts)
  969. dp.SetIntValue(val)
  970. }
  971. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  972. func (m *metricHaproxyResponsesErrors) updateCapacity() {
  973. if m.data.Sum().DataPoints().Len() > m.capacity {
  974. m.capacity = m.data.Sum().DataPoints().Len()
  975. }
  976. }
  977. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  978. func (m *metricHaproxyResponsesErrors) emit(metrics pmetric.MetricSlice) {
  979. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  980. m.updateCapacity()
  981. m.data.MoveTo(metrics.AppendEmpty())
  982. m.init()
  983. }
  984. }
  985. func newMetricHaproxyResponsesErrors(cfg MetricConfig) metricHaproxyResponsesErrors {
  986. m := metricHaproxyResponsesErrors{config: cfg}
  987. if cfg.Enabled {
  988. m.data = pmetric.NewMetric()
  989. m.init()
  990. }
  991. return m
  992. }
  993. type metricHaproxyServerSelectedTotal struct {
  994. data pmetric.Metric // data buffer for generated metric.
  995. config MetricConfig // metric config provided by user.
  996. capacity int // max observed number of data points added to the metric.
  997. }
  998. // init fills haproxy.server_selected.total metric with initial data.
  999. func (m *metricHaproxyServerSelectedTotal) init() {
  1000. m.data.SetName("haproxy.server_selected.total")
  1001. m.data.SetDescription("Number of times a server was selected, either for new sessions or when re-dispatching. Corresponds to HAProxy's `lbtot` metric.")
  1002. m.data.SetUnit("{selections}")
  1003. m.data.SetEmptySum()
  1004. m.data.Sum().SetIsMonotonic(true)
  1005. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  1006. }
  1007. func (m *metricHaproxyServerSelectedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  1008. if !m.config.Enabled {
  1009. return
  1010. }
  1011. dp := m.data.Sum().DataPoints().AppendEmpty()
  1012. dp.SetStartTimestamp(start)
  1013. dp.SetTimestamp(ts)
  1014. dp.SetIntValue(val)
  1015. }
  1016. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  1017. func (m *metricHaproxyServerSelectedTotal) updateCapacity() {
  1018. if m.data.Sum().DataPoints().Len() > m.capacity {
  1019. m.capacity = m.data.Sum().DataPoints().Len()
  1020. }
  1021. }
  1022. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  1023. func (m *metricHaproxyServerSelectedTotal) emit(metrics pmetric.MetricSlice) {
  1024. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  1025. m.updateCapacity()
  1026. m.data.MoveTo(metrics.AppendEmpty())
  1027. m.init()
  1028. }
  1029. }
  1030. func newMetricHaproxyServerSelectedTotal(cfg MetricConfig) metricHaproxyServerSelectedTotal {
  1031. m := metricHaproxyServerSelectedTotal{config: cfg}
  1032. if cfg.Enabled {
  1033. m.data = pmetric.NewMetric()
  1034. m.init()
  1035. }
  1036. return m
  1037. }
  1038. type metricHaproxySessionsAverage struct {
  1039. data pmetric.Metric // data buffer for generated metric.
  1040. config MetricConfig // metric config provided by user.
  1041. capacity int // max observed number of data points added to the metric.
  1042. }
  1043. // init fills haproxy.sessions.average metric with initial data.
  1044. func (m *metricHaproxySessionsAverage) init() {
  1045. m.data.SetName("haproxy.sessions.average")
  1046. m.data.SetDescription("Average total session time in ms over the last 1024 requests. Corresponds to HAProxy's `ttime` metric.")
  1047. m.data.SetUnit("ms")
  1048. m.data.SetEmptyGauge()
  1049. }
  1050. func (m *metricHaproxySessionsAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
  1051. if !m.config.Enabled {
  1052. return
  1053. }
  1054. dp := m.data.Gauge().DataPoints().AppendEmpty()
  1055. dp.SetStartTimestamp(start)
  1056. dp.SetTimestamp(ts)
  1057. dp.SetDoubleValue(val)
  1058. }
  1059. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  1060. func (m *metricHaproxySessionsAverage) updateCapacity() {
  1061. if m.data.Gauge().DataPoints().Len() > m.capacity {
  1062. m.capacity = m.data.Gauge().DataPoints().Len()
  1063. }
  1064. }
  1065. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  1066. func (m *metricHaproxySessionsAverage) emit(metrics pmetric.MetricSlice) {
  1067. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  1068. m.updateCapacity()
  1069. m.data.MoveTo(metrics.AppendEmpty())
  1070. m.init()
  1071. }
  1072. }
  1073. func newMetricHaproxySessionsAverage(cfg MetricConfig) metricHaproxySessionsAverage {
  1074. m := metricHaproxySessionsAverage{config: cfg}
  1075. if cfg.Enabled {
  1076. m.data = pmetric.NewMetric()
  1077. m.init()
  1078. }
  1079. return m
  1080. }
  1081. type metricHaproxySessionsCount struct {
  1082. data pmetric.Metric // data buffer for generated metric.
  1083. config MetricConfig // metric config provided by user.
  1084. capacity int // max observed number of data points added to the metric.
  1085. }
  1086. // init fills haproxy.sessions.count metric with initial data.
  1087. func (m *metricHaproxySessionsCount) init() {
  1088. m.data.SetName("haproxy.sessions.count")
  1089. m.data.SetDescription("Current sessions. Corresponds to HAProxy's `scur` metric.")
  1090. m.data.SetUnit("{sessions}")
  1091. m.data.SetEmptyGauge()
  1092. }
  1093. func (m *metricHaproxySessionsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  1094. if !m.config.Enabled {
  1095. return
  1096. }
  1097. dp := m.data.Gauge().DataPoints().AppendEmpty()
  1098. dp.SetStartTimestamp(start)
  1099. dp.SetTimestamp(ts)
  1100. dp.SetIntValue(val)
  1101. }
  1102. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  1103. func (m *metricHaproxySessionsCount) updateCapacity() {
  1104. if m.data.Gauge().DataPoints().Len() > m.capacity {
  1105. m.capacity = m.data.Gauge().DataPoints().Len()
  1106. }
  1107. }
  1108. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  1109. func (m *metricHaproxySessionsCount) emit(metrics pmetric.MetricSlice) {
  1110. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  1111. m.updateCapacity()
  1112. m.data.MoveTo(metrics.AppendEmpty())
  1113. m.init()
  1114. }
  1115. }
  1116. func newMetricHaproxySessionsCount(cfg MetricConfig) metricHaproxySessionsCount {
  1117. m := metricHaproxySessionsCount{config: cfg}
  1118. if cfg.Enabled {
  1119. m.data = pmetric.NewMetric()
  1120. m.init()
  1121. }
  1122. return m
  1123. }
  1124. type metricHaproxySessionsRate struct {
  1125. data pmetric.Metric // data buffer for generated metric.
  1126. config MetricConfig // metric config provided by user.
  1127. capacity int // max observed number of data points added to the metric.
  1128. }
  1129. // init fills haproxy.sessions.rate metric with initial data.
  1130. func (m *metricHaproxySessionsRate) init() {
  1131. m.data.SetName("haproxy.sessions.rate")
  1132. m.data.SetDescription("Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric.")
  1133. m.data.SetUnit("{sessions}")
  1134. m.data.SetEmptyGauge()
  1135. }
  1136. func (m *metricHaproxySessionsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
  1137. if !m.config.Enabled {
  1138. return
  1139. }
  1140. dp := m.data.Gauge().DataPoints().AppendEmpty()
  1141. dp.SetStartTimestamp(start)
  1142. dp.SetTimestamp(ts)
  1143. dp.SetDoubleValue(val)
  1144. }
  1145. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  1146. func (m *metricHaproxySessionsRate) updateCapacity() {
  1147. if m.data.Gauge().DataPoints().Len() > m.capacity {
  1148. m.capacity = m.data.Gauge().DataPoints().Len()
  1149. }
  1150. }
  1151. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  1152. func (m *metricHaproxySessionsRate) emit(metrics pmetric.MetricSlice) {
  1153. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  1154. m.updateCapacity()
  1155. m.data.MoveTo(metrics.AppendEmpty())
  1156. m.init()
  1157. }
  1158. }
  1159. func newMetricHaproxySessionsRate(cfg MetricConfig) metricHaproxySessionsRate {
  1160. m := metricHaproxySessionsRate{config: cfg}
  1161. if cfg.Enabled {
  1162. m.data = pmetric.NewMetric()
  1163. m.init()
  1164. }
  1165. return m
  1166. }
  1167. type metricHaproxySessionsTotal struct {
  1168. data pmetric.Metric // data buffer for generated metric.
  1169. config MetricConfig // metric config provided by user.
  1170. capacity int // max observed number of data points added to the metric.
  1171. }
  1172. // init fills haproxy.sessions.total metric with initial data.
  1173. func (m *metricHaproxySessionsTotal) init() {
  1174. m.data.SetName("haproxy.sessions.total")
  1175. m.data.SetDescription("Cumulative number of sessions. Corresponds to HAProxy's `stot` metric.")
  1176. m.data.SetUnit("{sessions}")
  1177. m.data.SetEmptySum()
  1178. m.data.Sum().SetIsMonotonic(true)
  1179. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  1180. }
  1181. func (m *metricHaproxySessionsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  1182. if !m.config.Enabled {
  1183. return
  1184. }
  1185. dp := m.data.Sum().DataPoints().AppendEmpty()
  1186. dp.SetStartTimestamp(start)
  1187. dp.SetTimestamp(ts)
  1188. dp.SetIntValue(val)
  1189. }
  1190. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  1191. func (m *metricHaproxySessionsTotal) updateCapacity() {
  1192. if m.data.Sum().DataPoints().Len() > m.capacity {
  1193. m.capacity = m.data.Sum().DataPoints().Len()
  1194. }
  1195. }
  1196. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  1197. func (m *metricHaproxySessionsTotal) emit(metrics pmetric.MetricSlice) {
  1198. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  1199. m.updateCapacity()
  1200. m.data.MoveTo(metrics.AppendEmpty())
  1201. m.init()
  1202. }
  1203. }
  1204. func newMetricHaproxySessionsTotal(cfg MetricConfig) metricHaproxySessionsTotal {
  1205. m := metricHaproxySessionsTotal{config: cfg}
  1206. if cfg.Enabled {
  1207. m.data = pmetric.NewMetric()
  1208. m.init()
  1209. }
  1210. return m
  1211. }
  1212. // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
  1213. // required to produce metric representation defined in metadata and user config.
  1214. type MetricsBuilder struct {
  1215. config MetricsBuilderConfig // config of the metrics builder.
  1216. startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
  1217. metricsCapacity int // maximum observed number of metrics per resource.
  1218. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
  1219. buildInfo component.BuildInfo // contains version information.
  1220. metricHaproxyBytesInput metricHaproxyBytesInput
  1221. metricHaproxyBytesOutput metricHaproxyBytesOutput
  1222. metricHaproxyClientsCanceled metricHaproxyClientsCanceled
  1223. metricHaproxyCompressionBypass metricHaproxyCompressionBypass
  1224. metricHaproxyCompressionCount metricHaproxyCompressionCount
  1225. metricHaproxyCompressionInput metricHaproxyCompressionInput
  1226. metricHaproxyCompressionOutput metricHaproxyCompressionOutput
  1227. metricHaproxyConnectionsErrors metricHaproxyConnectionsErrors
  1228. metricHaproxyConnectionsRate metricHaproxyConnectionsRate
  1229. metricHaproxyConnectionsRetries metricHaproxyConnectionsRetries
  1230. metricHaproxyConnectionsTotal metricHaproxyConnectionsTotal
  1231. metricHaproxyDowntime metricHaproxyDowntime
  1232. metricHaproxyFailedChecks metricHaproxyFailedChecks
  1233. metricHaproxyRequestsDenied metricHaproxyRequestsDenied
  1234. metricHaproxyRequestsErrors metricHaproxyRequestsErrors
  1235. metricHaproxyRequestsQueued metricHaproxyRequestsQueued
  1236. metricHaproxyRequestsRate metricHaproxyRequestsRate
  1237. metricHaproxyRequestsRedispatched metricHaproxyRequestsRedispatched
  1238. metricHaproxyRequestsTotal metricHaproxyRequestsTotal
  1239. metricHaproxyResponsesDenied metricHaproxyResponsesDenied
  1240. metricHaproxyResponsesErrors metricHaproxyResponsesErrors
  1241. metricHaproxyServerSelectedTotal metricHaproxyServerSelectedTotal
  1242. metricHaproxySessionsAverage metricHaproxySessionsAverage
  1243. metricHaproxySessionsCount metricHaproxySessionsCount
  1244. metricHaproxySessionsRate metricHaproxySessionsRate
  1245. metricHaproxySessionsTotal metricHaproxySessionsTotal
  1246. }
  1247. // metricBuilderOption applies changes to default metrics builder.
  1248. type metricBuilderOption func(*MetricsBuilder)
  1249. // WithStartTime sets startTime on the metrics builder.
  1250. func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
  1251. return func(mb *MetricsBuilder) {
  1252. mb.startTime = startTime
  1253. }
  1254. }
  1255. func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder {
  1256. mb := &MetricsBuilder{
  1257. config: mbc,
  1258. startTime: pcommon.NewTimestampFromTime(time.Now()),
  1259. metricsBuffer: pmetric.NewMetrics(),
  1260. buildInfo: settings.BuildInfo,
  1261. metricHaproxyBytesInput: newMetricHaproxyBytesInput(mbc.Metrics.HaproxyBytesInput),
  1262. metricHaproxyBytesOutput: newMetricHaproxyBytesOutput(mbc.Metrics.HaproxyBytesOutput),
  1263. metricHaproxyClientsCanceled: newMetricHaproxyClientsCanceled(mbc.Metrics.HaproxyClientsCanceled),
  1264. metricHaproxyCompressionBypass: newMetricHaproxyCompressionBypass(mbc.Metrics.HaproxyCompressionBypass),
  1265. metricHaproxyCompressionCount: newMetricHaproxyCompressionCount(mbc.Metrics.HaproxyCompressionCount),
  1266. metricHaproxyCompressionInput: newMetricHaproxyCompressionInput(mbc.Metrics.HaproxyCompressionInput),
  1267. metricHaproxyCompressionOutput: newMetricHaproxyCompressionOutput(mbc.Metrics.HaproxyCompressionOutput),
  1268. metricHaproxyConnectionsErrors: newMetricHaproxyConnectionsErrors(mbc.Metrics.HaproxyConnectionsErrors),
  1269. metricHaproxyConnectionsRate: newMetricHaproxyConnectionsRate(mbc.Metrics.HaproxyConnectionsRate),
  1270. metricHaproxyConnectionsRetries: newMetricHaproxyConnectionsRetries(mbc.Metrics.HaproxyConnectionsRetries),
  1271. metricHaproxyConnectionsTotal: newMetricHaproxyConnectionsTotal(mbc.Metrics.HaproxyConnectionsTotal),
  1272. metricHaproxyDowntime: newMetricHaproxyDowntime(mbc.Metrics.HaproxyDowntime),
  1273. metricHaproxyFailedChecks: newMetricHaproxyFailedChecks(mbc.Metrics.HaproxyFailedChecks),
  1274. metricHaproxyRequestsDenied: newMetricHaproxyRequestsDenied(mbc.Metrics.HaproxyRequestsDenied),
  1275. metricHaproxyRequestsErrors: newMetricHaproxyRequestsErrors(mbc.Metrics.HaproxyRequestsErrors),
  1276. metricHaproxyRequestsQueued: newMetricHaproxyRequestsQueued(mbc.Metrics.HaproxyRequestsQueued),
  1277. metricHaproxyRequestsRate: newMetricHaproxyRequestsRate(mbc.Metrics.HaproxyRequestsRate),
  1278. metricHaproxyRequestsRedispatched: newMetricHaproxyRequestsRedispatched(mbc.Metrics.HaproxyRequestsRedispatched),
  1279. metricHaproxyRequestsTotal: newMetricHaproxyRequestsTotal(mbc.Metrics.HaproxyRequestsTotal),
  1280. metricHaproxyResponsesDenied: newMetricHaproxyResponsesDenied(mbc.Metrics.HaproxyResponsesDenied),
  1281. metricHaproxyResponsesErrors: newMetricHaproxyResponsesErrors(mbc.Metrics.HaproxyResponsesErrors),
  1282. metricHaproxyServerSelectedTotal: newMetricHaproxyServerSelectedTotal(mbc.Metrics.HaproxyServerSelectedTotal),
  1283. metricHaproxySessionsAverage: newMetricHaproxySessionsAverage(mbc.Metrics.HaproxySessionsAverage),
  1284. metricHaproxySessionsCount: newMetricHaproxySessionsCount(mbc.Metrics.HaproxySessionsCount),
  1285. metricHaproxySessionsRate: newMetricHaproxySessionsRate(mbc.Metrics.HaproxySessionsRate),
  1286. metricHaproxySessionsTotal: newMetricHaproxySessionsTotal(mbc.Metrics.HaproxySessionsTotal),
  1287. }
  1288. for _, op := range options {
  1289. op(mb)
  1290. }
  1291. return mb
  1292. }
  1293. // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
  1294. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
  1295. return NewResourceBuilder(mb.config.ResourceAttributes)
  1296. }
  1297. // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
  1298. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
  1299. if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
  1300. mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
  1301. }
  1302. }
  1303. // ResourceMetricsOption applies changes to provided resource metrics.
  1304. type ResourceMetricsOption func(pmetric.ResourceMetrics)
  1305. // WithResource sets the provided resource on the emitted ResourceMetrics.
  1306. // It's recommended to use ResourceBuilder to create the resource.
  1307. func WithResource(res pcommon.Resource) ResourceMetricsOption {
  1308. return func(rm pmetric.ResourceMetrics) {
  1309. res.CopyTo(rm.Resource())
  1310. }
  1311. }
  1312. // WithStartTimeOverride overrides start time for all the resource metrics data points.
  1313. // This option should be only used if different start time has to be set on metrics coming from different resources.
  1314. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
  1315. return func(rm pmetric.ResourceMetrics) {
  1316. var dps pmetric.NumberDataPointSlice
  1317. metrics := rm.ScopeMetrics().At(0).Metrics()
  1318. for i := 0; i < metrics.Len(); i++ {
  1319. switch metrics.At(i).Type() {
  1320. case pmetric.MetricTypeGauge:
  1321. dps = metrics.At(i).Gauge().DataPoints()
  1322. case pmetric.MetricTypeSum:
  1323. dps = metrics.At(i).Sum().DataPoints()
  1324. }
  1325. for j := 0; j < dps.Len(); j++ {
  1326. dps.At(j).SetStartTimestamp(start)
  1327. }
  1328. }
  1329. }
  1330. }
  1331. // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
  1332. // recording another set of data points as part of another resource. This function can be helpful when one scraper
  1333. // needs to emit metrics from several resources. Otherwise calling this function is not required,
  1334. // just `Emit` function can be called instead.
  1335. // Resource attributes should be provided as ResourceMetricsOption arguments.
  1336. func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
  1337. rm := pmetric.NewResourceMetrics()
  1338. ils := rm.ScopeMetrics().AppendEmpty()
  1339. ils.Scope().SetName("otelcol/haproxyreceiver")
  1340. ils.Scope().SetVersion(mb.buildInfo.Version)
  1341. ils.Metrics().EnsureCapacity(mb.metricsCapacity)
  1342. mb.metricHaproxyBytesInput.emit(ils.Metrics())
  1343. mb.metricHaproxyBytesOutput.emit(ils.Metrics())
  1344. mb.metricHaproxyClientsCanceled.emit(ils.Metrics())
  1345. mb.metricHaproxyCompressionBypass.emit(ils.Metrics())
  1346. mb.metricHaproxyCompressionCount.emit(ils.Metrics())
  1347. mb.metricHaproxyCompressionInput.emit(ils.Metrics())
  1348. mb.metricHaproxyCompressionOutput.emit(ils.Metrics())
  1349. mb.metricHaproxyConnectionsErrors.emit(ils.Metrics())
  1350. mb.metricHaproxyConnectionsRate.emit(ils.Metrics())
  1351. mb.metricHaproxyConnectionsRetries.emit(ils.Metrics())
  1352. mb.metricHaproxyConnectionsTotal.emit(ils.Metrics())
  1353. mb.metricHaproxyDowntime.emit(ils.Metrics())
  1354. mb.metricHaproxyFailedChecks.emit(ils.Metrics())
  1355. mb.metricHaproxyRequestsDenied.emit(ils.Metrics())
  1356. mb.metricHaproxyRequestsErrors.emit(ils.Metrics())
  1357. mb.metricHaproxyRequestsQueued.emit(ils.Metrics())
  1358. mb.metricHaproxyRequestsRate.emit(ils.Metrics())
  1359. mb.metricHaproxyRequestsRedispatched.emit(ils.Metrics())
  1360. mb.metricHaproxyRequestsTotal.emit(ils.Metrics())
  1361. mb.metricHaproxyResponsesDenied.emit(ils.Metrics())
  1362. mb.metricHaproxyResponsesErrors.emit(ils.Metrics())
  1363. mb.metricHaproxyServerSelectedTotal.emit(ils.Metrics())
  1364. mb.metricHaproxySessionsAverage.emit(ils.Metrics())
  1365. mb.metricHaproxySessionsCount.emit(ils.Metrics())
  1366. mb.metricHaproxySessionsRate.emit(ils.Metrics())
  1367. mb.metricHaproxySessionsTotal.emit(ils.Metrics())
  1368. for _, op := range rmo {
  1369. op(rm)
  1370. }
  1371. if ils.Metrics().Len() > 0 {
  1372. mb.updateCapacity(rm)
  1373. rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
  1374. }
  1375. }
  1376. // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
  1377. // recording another set of metrics. This function will be responsible for applying all the transformations required to
  1378. // produce metric representation defined in metadata and user config, e.g. delta or cumulative.
  1379. func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
  1380. mb.EmitForResource(rmo...)
  1381. metrics := mb.metricsBuffer
  1382. mb.metricsBuffer = pmetric.NewMetrics()
  1383. return metrics
  1384. }
  1385. // RecordHaproxyBytesInputDataPoint adds a data point to haproxy.bytes.input metric.
  1386. func (mb *MetricsBuilder) RecordHaproxyBytesInputDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1387. val, err := strconv.ParseInt(inputVal, 10, 64)
  1388. if err != nil {
  1389. return fmt.Errorf("failed to parse int64 for HaproxyBytesInput, value was %s: %w", inputVal, err)
  1390. }
  1391. mb.metricHaproxyBytesInput.recordDataPoint(mb.startTime, ts, val)
  1392. return nil
  1393. }
  1394. // RecordHaproxyBytesOutputDataPoint adds a data point to haproxy.bytes.output metric.
  1395. func (mb *MetricsBuilder) RecordHaproxyBytesOutputDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1396. val, err := strconv.ParseInt(inputVal, 10, 64)
  1397. if err != nil {
  1398. return fmt.Errorf("failed to parse int64 for HaproxyBytesOutput, value was %s: %w", inputVal, err)
  1399. }
  1400. mb.metricHaproxyBytesOutput.recordDataPoint(mb.startTime, ts, val)
  1401. return nil
  1402. }
  1403. // RecordHaproxyClientsCanceledDataPoint adds a data point to haproxy.clients.canceled metric.
  1404. func (mb *MetricsBuilder) RecordHaproxyClientsCanceledDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1405. val, err := strconv.ParseInt(inputVal, 10, 64)
  1406. if err != nil {
  1407. return fmt.Errorf("failed to parse int64 for HaproxyClientsCanceled, value was %s: %w", inputVal, err)
  1408. }
  1409. mb.metricHaproxyClientsCanceled.recordDataPoint(mb.startTime, ts, val)
  1410. return nil
  1411. }
  1412. // RecordHaproxyCompressionBypassDataPoint adds a data point to haproxy.compression.bypass metric.
  1413. func (mb *MetricsBuilder) RecordHaproxyCompressionBypassDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1414. val, err := strconv.ParseInt(inputVal, 10, 64)
  1415. if err != nil {
  1416. return fmt.Errorf("failed to parse int64 for HaproxyCompressionBypass, value was %s: %w", inputVal, err)
  1417. }
  1418. mb.metricHaproxyCompressionBypass.recordDataPoint(mb.startTime, ts, val)
  1419. return nil
  1420. }
  1421. // RecordHaproxyCompressionCountDataPoint adds a data point to haproxy.compression.count metric.
  1422. func (mb *MetricsBuilder) RecordHaproxyCompressionCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1423. val, err := strconv.ParseInt(inputVal, 10, 64)
  1424. if err != nil {
  1425. return fmt.Errorf("failed to parse int64 for HaproxyCompressionCount, value was %s: %w", inputVal, err)
  1426. }
  1427. mb.metricHaproxyCompressionCount.recordDataPoint(mb.startTime, ts, val)
  1428. return nil
  1429. }
  1430. // RecordHaproxyCompressionInputDataPoint adds a data point to haproxy.compression.input metric.
  1431. func (mb *MetricsBuilder) RecordHaproxyCompressionInputDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1432. val, err := strconv.ParseInt(inputVal, 10, 64)
  1433. if err != nil {
  1434. return fmt.Errorf("failed to parse int64 for HaproxyCompressionInput, value was %s: %w", inputVal, err)
  1435. }
  1436. mb.metricHaproxyCompressionInput.recordDataPoint(mb.startTime, ts, val)
  1437. return nil
  1438. }
  1439. // RecordHaproxyCompressionOutputDataPoint adds a data point to haproxy.compression.output metric.
  1440. func (mb *MetricsBuilder) RecordHaproxyCompressionOutputDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1441. val, err := strconv.ParseInt(inputVal, 10, 64)
  1442. if err != nil {
  1443. return fmt.Errorf("failed to parse int64 for HaproxyCompressionOutput, value was %s: %w", inputVal, err)
  1444. }
  1445. mb.metricHaproxyCompressionOutput.recordDataPoint(mb.startTime, ts, val)
  1446. return nil
  1447. }
  1448. // RecordHaproxyConnectionsErrorsDataPoint adds a data point to haproxy.connections.errors metric.
  1449. func (mb *MetricsBuilder) RecordHaproxyConnectionsErrorsDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1450. val, err := strconv.ParseInt(inputVal, 10, 64)
  1451. if err != nil {
  1452. return fmt.Errorf("failed to parse int64 for HaproxyConnectionsErrors, value was %s: %w", inputVal, err)
  1453. }
  1454. mb.metricHaproxyConnectionsErrors.recordDataPoint(mb.startTime, ts, val)
  1455. return nil
  1456. }
  1457. // RecordHaproxyConnectionsRateDataPoint adds a data point to haproxy.connections.rate metric.
  1458. func (mb *MetricsBuilder) RecordHaproxyConnectionsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1459. val, err := strconv.ParseInt(inputVal, 10, 64)
  1460. if err != nil {
  1461. return fmt.Errorf("failed to parse int64 for HaproxyConnectionsRate, value was %s: %w", inputVal, err)
  1462. }
  1463. mb.metricHaproxyConnectionsRate.recordDataPoint(mb.startTime, ts, val)
  1464. return nil
  1465. }
  1466. // RecordHaproxyConnectionsRetriesDataPoint adds a data point to haproxy.connections.retries metric.
  1467. func (mb *MetricsBuilder) RecordHaproxyConnectionsRetriesDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1468. val, err := strconv.ParseInt(inputVal, 10, 64)
  1469. if err != nil {
  1470. return fmt.Errorf("failed to parse int64 for HaproxyConnectionsRetries, value was %s: %w", inputVal, err)
  1471. }
  1472. mb.metricHaproxyConnectionsRetries.recordDataPoint(mb.startTime, ts, val)
  1473. return nil
  1474. }
  1475. // RecordHaproxyConnectionsTotalDataPoint adds a data point to haproxy.connections.total metric.
  1476. func (mb *MetricsBuilder) RecordHaproxyConnectionsTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1477. val, err := strconv.ParseInt(inputVal, 10, 64)
  1478. if err != nil {
  1479. return fmt.Errorf("failed to parse int64 for HaproxyConnectionsTotal, value was %s: %w", inputVal, err)
  1480. }
  1481. mb.metricHaproxyConnectionsTotal.recordDataPoint(mb.startTime, ts, val)
  1482. return nil
  1483. }
  1484. // RecordHaproxyDowntimeDataPoint adds a data point to haproxy.downtime metric.
  1485. func (mb *MetricsBuilder) RecordHaproxyDowntimeDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1486. val, err := strconv.ParseInt(inputVal, 10, 64)
  1487. if err != nil {
  1488. return fmt.Errorf("failed to parse int64 for HaproxyDowntime, value was %s: %w", inputVal, err)
  1489. }
  1490. mb.metricHaproxyDowntime.recordDataPoint(mb.startTime, ts, val)
  1491. return nil
  1492. }
  1493. // RecordHaproxyFailedChecksDataPoint adds a data point to haproxy.failed_checks metric.
  1494. func (mb *MetricsBuilder) RecordHaproxyFailedChecksDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1495. val, err := strconv.ParseInt(inputVal, 10, 64)
  1496. if err != nil {
  1497. return fmt.Errorf("failed to parse int64 for HaproxyFailedChecks, value was %s: %w", inputVal, err)
  1498. }
  1499. mb.metricHaproxyFailedChecks.recordDataPoint(mb.startTime, ts, val)
  1500. return nil
  1501. }
  1502. // RecordHaproxyRequestsDeniedDataPoint adds a data point to haproxy.requests.denied metric.
  1503. func (mb *MetricsBuilder) RecordHaproxyRequestsDeniedDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1504. val, err := strconv.ParseInt(inputVal, 10, 64)
  1505. if err != nil {
  1506. return fmt.Errorf("failed to parse int64 for HaproxyRequestsDenied, value was %s: %w", inputVal, err)
  1507. }
  1508. mb.metricHaproxyRequestsDenied.recordDataPoint(mb.startTime, ts, val)
  1509. return nil
  1510. }
  1511. // RecordHaproxyRequestsErrorsDataPoint adds a data point to haproxy.requests.errors metric.
  1512. func (mb *MetricsBuilder) RecordHaproxyRequestsErrorsDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1513. val, err := strconv.ParseInt(inputVal, 10, 64)
  1514. if err != nil {
  1515. return fmt.Errorf("failed to parse int64 for HaproxyRequestsErrors, value was %s: %w", inputVal, err)
  1516. }
  1517. mb.metricHaproxyRequestsErrors.recordDataPoint(mb.startTime, ts, val)
  1518. return nil
  1519. }
  1520. // RecordHaproxyRequestsQueuedDataPoint adds a data point to haproxy.requests.queued metric.
  1521. func (mb *MetricsBuilder) RecordHaproxyRequestsQueuedDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1522. val, err := strconv.ParseInt(inputVal, 10, 64)
  1523. if err != nil {
  1524. return fmt.Errorf("failed to parse int64 for HaproxyRequestsQueued, value was %s: %w", inputVal, err)
  1525. }
  1526. mb.metricHaproxyRequestsQueued.recordDataPoint(mb.startTime, ts, val)
  1527. return nil
  1528. }
  1529. // RecordHaproxyRequestsRateDataPoint adds a data point to haproxy.requests.rate metric.
  1530. func (mb *MetricsBuilder) RecordHaproxyRequestsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1531. val, err := strconv.ParseFloat(inputVal, 64)
  1532. if err != nil {
  1533. return fmt.Errorf("failed to parse float64 for HaproxyRequestsRate, value was %s: %w", inputVal, err)
  1534. }
  1535. mb.metricHaproxyRequestsRate.recordDataPoint(mb.startTime, ts, val)
  1536. return nil
  1537. }
  1538. // RecordHaproxyRequestsRedispatchedDataPoint adds a data point to haproxy.requests.redispatched metric.
  1539. func (mb *MetricsBuilder) RecordHaproxyRequestsRedispatchedDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1540. val, err := strconv.ParseInt(inputVal, 10, 64)
  1541. if err != nil {
  1542. return fmt.Errorf("failed to parse int64 for HaproxyRequestsRedispatched, value was %s: %w", inputVal, err)
  1543. }
  1544. mb.metricHaproxyRequestsRedispatched.recordDataPoint(mb.startTime, ts, val)
  1545. return nil
  1546. }
  1547. // RecordHaproxyRequestsTotalDataPoint adds a data point to haproxy.requests.total metric.
  1548. func (mb *MetricsBuilder) RecordHaproxyRequestsTotalDataPoint(ts pcommon.Timestamp, inputVal string, statusCodeAttributeValue AttributeStatusCode) error {
  1549. val, err := strconv.ParseInt(inputVal, 10, 64)
  1550. if err != nil {
  1551. return fmt.Errorf("failed to parse int64 for HaproxyRequestsTotal, value was %s: %w", inputVal, err)
  1552. }
  1553. mb.metricHaproxyRequestsTotal.recordDataPoint(mb.startTime, ts, val, statusCodeAttributeValue.String())
  1554. return nil
  1555. }
  1556. // RecordHaproxyResponsesDeniedDataPoint adds a data point to haproxy.responses.denied metric.
  1557. func (mb *MetricsBuilder) RecordHaproxyResponsesDeniedDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1558. val, err := strconv.ParseInt(inputVal, 10, 64)
  1559. if err != nil {
  1560. return fmt.Errorf("failed to parse int64 for HaproxyResponsesDenied, value was %s: %w", inputVal, err)
  1561. }
  1562. mb.metricHaproxyResponsesDenied.recordDataPoint(mb.startTime, ts, val)
  1563. return nil
  1564. }
  1565. // RecordHaproxyResponsesErrorsDataPoint adds a data point to haproxy.responses.errors metric.
  1566. func (mb *MetricsBuilder) RecordHaproxyResponsesErrorsDataPoint(ts pcommon.Timestamp, val int64) {
  1567. mb.metricHaproxyResponsesErrors.recordDataPoint(mb.startTime, ts, val)
  1568. }
  1569. // RecordHaproxyServerSelectedTotalDataPoint adds a data point to haproxy.server_selected.total metric.
  1570. func (mb *MetricsBuilder) RecordHaproxyServerSelectedTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1571. val, err := strconv.ParseInt(inputVal, 10, 64)
  1572. if err != nil {
  1573. return fmt.Errorf("failed to parse int64 for HaproxyServerSelectedTotal, value was %s: %w", inputVal, err)
  1574. }
  1575. mb.metricHaproxyServerSelectedTotal.recordDataPoint(mb.startTime, ts, val)
  1576. return nil
  1577. }
  1578. // RecordHaproxySessionsAverageDataPoint adds a data point to haproxy.sessions.average metric.
  1579. func (mb *MetricsBuilder) RecordHaproxySessionsAverageDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1580. val, err := strconv.ParseFloat(inputVal, 64)
  1581. if err != nil {
  1582. return fmt.Errorf("failed to parse float64 for HaproxySessionsAverage, value was %s: %w", inputVal, err)
  1583. }
  1584. mb.metricHaproxySessionsAverage.recordDataPoint(mb.startTime, ts, val)
  1585. return nil
  1586. }
  1587. // RecordHaproxySessionsCountDataPoint adds a data point to haproxy.sessions.count metric.
  1588. func (mb *MetricsBuilder) RecordHaproxySessionsCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1589. val, err := strconv.ParseInt(inputVal, 10, 64)
  1590. if err != nil {
  1591. return fmt.Errorf("failed to parse int64 for HaproxySessionsCount, value was %s: %w", inputVal, err)
  1592. }
  1593. mb.metricHaproxySessionsCount.recordDataPoint(mb.startTime, ts, val)
  1594. return nil
  1595. }
  1596. // RecordHaproxySessionsRateDataPoint adds a data point to haproxy.sessions.rate metric.
  1597. func (mb *MetricsBuilder) RecordHaproxySessionsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1598. val, err := strconv.ParseFloat(inputVal, 64)
  1599. if err != nil {
  1600. return fmt.Errorf("failed to parse float64 for HaproxySessionsRate, value was %s: %w", inputVal, err)
  1601. }
  1602. mb.metricHaproxySessionsRate.recordDataPoint(mb.startTime, ts, val)
  1603. return nil
  1604. }
  1605. // RecordHaproxySessionsTotalDataPoint adds a data point to haproxy.sessions.total metric.
  1606. func (mb *MetricsBuilder) RecordHaproxySessionsTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
  1607. val, err := strconv.ParseInt(inputVal, 10, 64)
  1608. if err != nil {
  1609. return fmt.Errorf("failed to parse int64 for HaproxySessionsTotal, value was %s: %w", inputVal, err)
  1610. }
  1611. mb.metricHaproxySessionsTotal.recordDataPoint(mb.startTime, ts, val)
  1612. return nil
  1613. }
  1614. // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
  1615. // and metrics builder should update its startTime and reset it's internal state accordingly.
  1616. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
  1617. mb.startTime = pcommon.NewTimestampFromTime(time.Now())
  1618. for _, op := range options {
  1619. op(mb)
  1620. }
  1621. }