12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823 |
- // Code generated by mdatagen. DO NOT EDIT.
- package metadata
- import (
- "fmt"
- "strconv"
- "time"
- "go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/receiver"
- )
- // AttributeStatusCode specifies the a value status_code attribute.
- type AttributeStatusCode int
- const (
- _ AttributeStatusCode = iota
- AttributeStatusCode1xx
- AttributeStatusCode2xx
- AttributeStatusCode3xx
- AttributeStatusCode4xx
- AttributeStatusCode5xx
- AttributeStatusCodeOther
- )
- // String returns the string representation of the AttributeStatusCode.
- func (av AttributeStatusCode) String() string {
- switch av {
- case AttributeStatusCode1xx:
- return "1xx"
- case AttributeStatusCode2xx:
- return "2xx"
- case AttributeStatusCode3xx:
- return "3xx"
- case AttributeStatusCode4xx:
- return "4xx"
- case AttributeStatusCode5xx:
- return "5xx"
- case AttributeStatusCodeOther:
- return "other"
- }
- return ""
- }
- // MapAttributeStatusCode is a helper map of string to AttributeStatusCode attribute value.
- var MapAttributeStatusCode = map[string]AttributeStatusCode{
- "1xx": AttributeStatusCode1xx,
- "2xx": AttributeStatusCode2xx,
- "3xx": AttributeStatusCode3xx,
- "4xx": AttributeStatusCode4xx,
- "5xx": AttributeStatusCode5xx,
- "other": AttributeStatusCodeOther,
- }
- type metricHaproxyBytesInput struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.bytes.input metric with initial data.
- func (m *metricHaproxyBytesInput) init() {
- m.data.SetName("haproxy.bytes.input")
- m.data.SetDescription("Bytes in. Corresponds to HAProxy's `bin` metric.")
- m.data.SetUnit("by")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyBytesInput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyBytesInput) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyBytesInput) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyBytesInput(cfg MetricConfig) metricHaproxyBytesInput {
- m := metricHaproxyBytesInput{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyBytesOutput struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.bytes.output metric with initial data.
- func (m *metricHaproxyBytesOutput) init() {
- m.data.SetName("haproxy.bytes.output")
- m.data.SetDescription("Bytes out. Corresponds to HAProxy's `bout` metric.")
- m.data.SetUnit("by")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyBytesOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyBytesOutput) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyBytesOutput) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyBytesOutput(cfg MetricConfig) metricHaproxyBytesOutput {
- m := metricHaproxyBytesOutput{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyClientsCanceled struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.clients.canceled metric with initial data.
- func (m *metricHaproxyClientsCanceled) init() {
- m.data.SetName("haproxy.clients.canceled")
- m.data.SetDescription("Number of data transfers aborted by the client. Corresponds to HAProxy's `cli_abrt` metric")
- m.data.SetUnit("{cancellations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyClientsCanceled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyClientsCanceled) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyClientsCanceled) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyClientsCanceled(cfg MetricConfig) metricHaproxyClientsCanceled {
- m := metricHaproxyClientsCanceled{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyCompressionBypass struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.compression.bypass metric with initial data.
- func (m *metricHaproxyCompressionBypass) init() {
- m.data.SetName("haproxy.compression.bypass")
- m.data.SetDescription("Number of bytes that bypassed the HTTP compressor (CPU/BW limit). Corresponds to HAProxy's `comp_byp` metric.")
- m.data.SetUnit("by")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyCompressionBypass) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyCompressionBypass) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyCompressionBypass) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyCompressionBypass(cfg MetricConfig) metricHaproxyCompressionBypass {
- m := metricHaproxyCompressionBypass{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyCompressionCount struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.compression.count metric with initial data.
- func (m *metricHaproxyCompressionCount) init() {
- m.data.SetName("haproxy.compression.count")
- m.data.SetDescription("Number of HTTP responses that were compressed. Corresponds to HAProxy's `comp_rsp` metric.")
- m.data.SetUnit("{responses}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyCompressionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyCompressionCount) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyCompressionCount) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyCompressionCount(cfg MetricConfig) metricHaproxyCompressionCount {
- m := metricHaproxyCompressionCount{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyCompressionInput struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.compression.input metric with initial data.
- func (m *metricHaproxyCompressionInput) init() {
- m.data.SetName("haproxy.compression.input")
- m.data.SetDescription("Number of HTTP response bytes fed to the compressor. Corresponds to HAProxy's `comp_in` metric.")
- m.data.SetUnit("by")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyCompressionInput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyCompressionInput) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyCompressionInput) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyCompressionInput(cfg MetricConfig) metricHaproxyCompressionInput {
- m := metricHaproxyCompressionInput{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyCompressionOutput struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.compression.output metric with initial data.
- func (m *metricHaproxyCompressionOutput) init() {
- m.data.SetName("haproxy.compression.output")
- m.data.SetDescription("Number of HTTP response bytes emitted by the compressor. Corresponds to HAProxy's `comp_out` metric.")
- m.data.SetUnit("by")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyCompressionOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyCompressionOutput) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyCompressionOutput) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyCompressionOutput(cfg MetricConfig) metricHaproxyCompressionOutput {
- m := metricHaproxyCompressionOutput{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyConnectionsErrors struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.connections.errors metric with initial data.
- func (m *metricHaproxyConnectionsErrors) init() {
- m.data.SetName("haproxy.connections.errors")
- m.data.SetDescription("Number of requests that encountered an error trying to connect to a backend server. The backend stat is the sum of the stat. Corresponds to HAProxy's `econ` metric")
- m.data.SetUnit("{errors}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyConnectionsErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyConnectionsErrors) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyConnectionsErrors) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyConnectionsErrors(cfg MetricConfig) metricHaproxyConnectionsErrors {
- m := metricHaproxyConnectionsErrors{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyConnectionsRate struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.connections.rate metric with initial data.
- func (m *metricHaproxyConnectionsRate) init() {
- m.data.SetName("haproxy.connections.rate")
- m.data.SetDescription("Number of connections over the last elapsed second (frontend). Corresponds to HAProxy's `conn_rate` metric.")
- m.data.SetUnit("{connections}")
- m.data.SetEmptyGauge()
- }
- func (m *metricHaproxyConnectionsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Gauge().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyConnectionsRate) updateCapacity() {
- if m.data.Gauge().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Gauge().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyConnectionsRate) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyConnectionsRate(cfg MetricConfig) metricHaproxyConnectionsRate {
- m := metricHaproxyConnectionsRate{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyConnectionsRetries struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.connections.retries metric with initial data.
- func (m *metricHaproxyConnectionsRetries) init() {
- m.data.SetName("haproxy.connections.retries")
- m.data.SetDescription("Number of times a connection to a server was retried. Corresponds to HAProxy's `wretr` metric.")
- m.data.SetUnit("{retries}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyConnectionsRetries) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyConnectionsRetries) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyConnectionsRetries) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyConnectionsRetries(cfg MetricConfig) metricHaproxyConnectionsRetries {
- m := metricHaproxyConnectionsRetries{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyConnectionsTotal struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.connections.total metric with initial data.
- func (m *metricHaproxyConnectionsTotal) init() {
- m.data.SetName("haproxy.connections.total")
- m.data.SetDescription("Cumulative number of connections (frontend). Corresponds to HAProxy's `conn_tot` metric.")
- m.data.SetUnit("{connections}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyConnectionsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyConnectionsTotal) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyConnectionsTotal) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyConnectionsTotal(cfg MetricConfig) metricHaproxyConnectionsTotal {
- m := metricHaproxyConnectionsTotal{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyDowntime struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.downtime metric with initial data.
- func (m *metricHaproxyDowntime) init() {
- m.data.SetName("haproxy.downtime")
- m.data.SetDescription("Total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime. Corresponds to HAProxy's `downtime` metric")
- m.data.SetUnit("s")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyDowntime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyDowntime) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyDowntime) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyDowntime(cfg MetricConfig) metricHaproxyDowntime {
- m := metricHaproxyDowntime{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyFailedChecks struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.failed_checks metric with initial data.
- func (m *metricHaproxyFailedChecks) init() {
- m.data.SetName("haproxy.failed_checks")
- m.data.SetDescription("Number of failed checks. (Only counts checks failed when the server is up). Corresponds to HAProxy's `chkfail` metric.")
- m.data.SetUnit("{checks}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyFailedChecks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyFailedChecks) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyFailedChecks) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyFailedChecks(cfg MetricConfig) metricHaproxyFailedChecks {
- m := metricHaproxyFailedChecks{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsDenied struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.denied metric with initial data.
- func (m *metricHaproxyRequestsDenied) init() {
- m.data.SetName("haproxy.requests.denied")
- m.data.SetDescription("Requests denied because of security concerns. Corresponds to HAProxy's `dreq` metric")
- m.data.SetUnit("{requests}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyRequestsDenied) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsDenied) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsDenied) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsDenied(cfg MetricConfig) metricHaproxyRequestsDenied {
- m := metricHaproxyRequestsDenied{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsErrors struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.errors metric with initial data.
- func (m *metricHaproxyRequestsErrors) init() {
- m.data.SetName("haproxy.requests.errors")
- m.data.SetDescription("Cumulative number of request errors. Corresponds to HAProxy's `ereq` metric.")
- m.data.SetUnit("{errors}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyRequestsErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsErrors) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsErrors) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsErrors(cfg MetricConfig) metricHaproxyRequestsErrors {
- m := metricHaproxyRequestsErrors{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsQueued struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.queued metric with initial data.
- func (m *metricHaproxyRequestsQueued) init() {
- m.data.SetName("haproxy.requests.queued")
- m.data.SetDescription("Current queued requests. For the backend this reports the number queued without a server assigned. Corresponds to HAProxy's `qcur` metric.")
- m.data.SetUnit("{requests}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyRequestsQueued) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsQueued) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsQueued) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsQueued(cfg MetricConfig) metricHaproxyRequestsQueued {
- m := metricHaproxyRequestsQueued{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsRate struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.rate metric with initial data.
- func (m *metricHaproxyRequestsRate) init() {
- m.data.SetName("haproxy.requests.rate")
- m.data.SetDescription("HTTP requests per second over last elapsed second. Corresponds to HAProxy's `req_rate` metric.")
- m.data.SetUnit("{requests}")
- m.data.SetEmptyGauge()
- }
- func (m *metricHaproxyRequestsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Gauge().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsRate) updateCapacity() {
- if m.data.Gauge().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Gauge().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsRate) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsRate(cfg MetricConfig) metricHaproxyRequestsRate {
- m := metricHaproxyRequestsRate{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsRedispatched struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.redispatched metric with initial data.
- func (m *metricHaproxyRequestsRedispatched) init() {
- m.data.SetName("haproxy.requests.redispatched")
- m.data.SetDescription("Number of times a request was redispatched to another server. Corresponds to HAProxy's `wredis` metric.")
- m.data.SetUnit("{requests}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyRequestsRedispatched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsRedispatched) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsRedispatched) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsRedispatched(cfg MetricConfig) metricHaproxyRequestsRedispatched {
- m := metricHaproxyRequestsRedispatched{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyRequestsTotal struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.requests.total metric with initial data.
- func (m *metricHaproxyRequestsTotal) init() {
- m.data.SetName("haproxy.requests.total")
- m.data.SetDescription("Total number of HTTP requests received. Corresponds to HAProxy's `req_tot`, `hrsp_1xx`, `hrsp_2xx`, `hrsp_3xx`, `hrsp_4xx`, `hrsp_5xx` and `hrsp_other` metrics.")
- m.data.SetUnit("{requests}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
- }
- func (m *metricHaproxyRequestsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, statusCodeAttributeValue string) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("status_code", statusCodeAttributeValue)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyRequestsTotal) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyRequestsTotal) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyRequestsTotal(cfg MetricConfig) metricHaproxyRequestsTotal {
- m := metricHaproxyRequestsTotal{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyResponsesDenied struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.responses.denied metric with initial data.
- func (m *metricHaproxyResponsesDenied) init() {
- m.data.SetName("haproxy.responses.denied")
- m.data.SetDescription("Responses denied because of security concerns. Corresponds to HAProxy's `dresp` metric")
- m.data.SetUnit("{responses}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyResponsesDenied) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyResponsesDenied) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyResponsesDenied) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyResponsesDenied(cfg MetricConfig) metricHaproxyResponsesDenied {
- m := metricHaproxyResponsesDenied{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyResponsesErrors struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.responses.errors metric with initial data.
- func (m *metricHaproxyResponsesErrors) init() {
- m.data.SetName("haproxy.responses.errors")
- m.data.SetDescription("Cumulative number of response errors. Corresponds to HAProxy's `eresp` metric, `srv_abrt` will be counted here also.")
- m.data.SetUnit("{errors}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyResponsesErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyResponsesErrors) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyResponsesErrors) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyResponsesErrors(cfg MetricConfig) metricHaproxyResponsesErrors {
- m := metricHaproxyResponsesErrors{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxyServerSelectedTotal struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.server_selected.total metric with initial data.
- func (m *metricHaproxyServerSelectedTotal) init() {
- m.data.SetName("haproxy.server_selected.total")
- m.data.SetDescription("Number of times a server was selected, either for new sessions or when re-dispatching. Corresponds to HAProxy's `lbtot` metric.")
- m.data.SetUnit("{selections}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxyServerSelectedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxyServerSelectedTotal) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxyServerSelectedTotal) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxyServerSelectedTotal(cfg MetricConfig) metricHaproxyServerSelectedTotal {
- m := metricHaproxyServerSelectedTotal{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxySessionsAverage struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.sessions.average metric with initial data.
- func (m *metricHaproxySessionsAverage) init() {
- m.data.SetName("haproxy.sessions.average")
- m.data.SetDescription("Average total session time in ms over the last 1024 requests. Corresponds to HAProxy's `ttime` metric.")
- m.data.SetUnit("ms")
- m.data.SetEmptyGauge()
- }
- func (m *metricHaproxySessionsAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Gauge().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxySessionsAverage) updateCapacity() {
- if m.data.Gauge().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Gauge().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxySessionsAverage) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxySessionsAverage(cfg MetricConfig) metricHaproxySessionsAverage {
- m := metricHaproxySessionsAverage{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxySessionsCount struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.sessions.count metric with initial data.
- func (m *metricHaproxySessionsCount) init() {
- m.data.SetName("haproxy.sessions.count")
- m.data.SetDescription("Current sessions. Corresponds to HAProxy's `scur` metric.")
- m.data.SetUnit("{sessions}")
- m.data.SetEmptyGauge()
- }
- func (m *metricHaproxySessionsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Gauge().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxySessionsCount) updateCapacity() {
- if m.data.Gauge().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Gauge().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxySessionsCount) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxySessionsCount(cfg MetricConfig) metricHaproxySessionsCount {
- m := metricHaproxySessionsCount{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxySessionsRate struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.sessions.rate metric with initial data.
- func (m *metricHaproxySessionsRate) init() {
- m.data.SetName("haproxy.sessions.rate")
- m.data.SetDescription("Number of sessions per second over last elapsed second. Corresponds to HAProxy's `rate` metric.")
- m.data.SetUnit("{sessions}")
- m.data.SetEmptyGauge()
- }
- func (m *metricHaproxySessionsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Gauge().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxySessionsRate) updateCapacity() {
- if m.data.Gauge().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Gauge().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxySessionsRate) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxySessionsRate(cfg MetricConfig) metricHaproxySessionsRate {
- m := metricHaproxySessionsRate{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- type metricHaproxySessionsTotal struct {
- data pmetric.Metric // data buffer for generated metric.
- config MetricConfig // metric config provided by user.
- capacity int // max observed number of data points added to the metric.
- }
- // init fills haproxy.sessions.total metric with initial data.
- func (m *metricHaproxySessionsTotal) init() {
- m.data.SetName("haproxy.sessions.total")
- m.data.SetDescription("Cumulative number of sessions. Corresponds to HAProxy's `stot` metric.")
- m.data.SetUnit("{sessions}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- }
- func (m *metricHaproxySessionsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.config.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- }
- // updateCapacity saves max length of data point slices that will be used for the slice capacity.
- func (m *metricHaproxySessionsTotal) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
- }
- // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
- func (m *metricHaproxySessionsTotal) emit(metrics pmetric.MetricSlice) {
- if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
- }
- func newMetricHaproxySessionsTotal(cfg MetricConfig) metricHaproxySessionsTotal {
- m := metricHaproxySessionsTotal{config: cfg}
- if cfg.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
- }
- // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
- // required to produce metric representation defined in metadata and user config.
- type MetricsBuilder struct {
- config MetricsBuilderConfig // config of the metrics builder.
- startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
- metricsCapacity int // maximum observed number of metrics per resource.
- metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
- buildInfo component.BuildInfo // contains version information.
- metricHaproxyBytesInput metricHaproxyBytesInput
- metricHaproxyBytesOutput metricHaproxyBytesOutput
- metricHaproxyClientsCanceled metricHaproxyClientsCanceled
- metricHaproxyCompressionBypass metricHaproxyCompressionBypass
- metricHaproxyCompressionCount metricHaproxyCompressionCount
- metricHaproxyCompressionInput metricHaproxyCompressionInput
- metricHaproxyCompressionOutput metricHaproxyCompressionOutput
- metricHaproxyConnectionsErrors metricHaproxyConnectionsErrors
- metricHaproxyConnectionsRate metricHaproxyConnectionsRate
- metricHaproxyConnectionsRetries metricHaproxyConnectionsRetries
- metricHaproxyConnectionsTotal metricHaproxyConnectionsTotal
- metricHaproxyDowntime metricHaproxyDowntime
- metricHaproxyFailedChecks metricHaproxyFailedChecks
- metricHaproxyRequestsDenied metricHaproxyRequestsDenied
- metricHaproxyRequestsErrors metricHaproxyRequestsErrors
- metricHaproxyRequestsQueued metricHaproxyRequestsQueued
- metricHaproxyRequestsRate metricHaproxyRequestsRate
- metricHaproxyRequestsRedispatched metricHaproxyRequestsRedispatched
- metricHaproxyRequestsTotal metricHaproxyRequestsTotal
- metricHaproxyResponsesDenied metricHaproxyResponsesDenied
- metricHaproxyResponsesErrors metricHaproxyResponsesErrors
- metricHaproxyServerSelectedTotal metricHaproxyServerSelectedTotal
- metricHaproxySessionsAverage metricHaproxySessionsAverage
- metricHaproxySessionsCount metricHaproxySessionsCount
- metricHaproxySessionsRate metricHaproxySessionsRate
- metricHaproxySessionsTotal metricHaproxySessionsTotal
- }
- // metricBuilderOption applies changes to default metrics builder.
- type metricBuilderOption func(*MetricsBuilder)
- // WithStartTime sets startTime on the metrics builder.
- func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
- return func(mb *MetricsBuilder) {
- mb.startTime = startTime
- }
- }
- func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder {
- mb := &MetricsBuilder{
- config: mbc,
- startTime: pcommon.NewTimestampFromTime(time.Now()),
- metricsBuffer: pmetric.NewMetrics(),
- buildInfo: settings.BuildInfo,
- metricHaproxyBytesInput: newMetricHaproxyBytesInput(mbc.Metrics.HaproxyBytesInput),
- metricHaproxyBytesOutput: newMetricHaproxyBytesOutput(mbc.Metrics.HaproxyBytesOutput),
- metricHaproxyClientsCanceled: newMetricHaproxyClientsCanceled(mbc.Metrics.HaproxyClientsCanceled),
- metricHaproxyCompressionBypass: newMetricHaproxyCompressionBypass(mbc.Metrics.HaproxyCompressionBypass),
- metricHaproxyCompressionCount: newMetricHaproxyCompressionCount(mbc.Metrics.HaproxyCompressionCount),
- metricHaproxyCompressionInput: newMetricHaproxyCompressionInput(mbc.Metrics.HaproxyCompressionInput),
- metricHaproxyCompressionOutput: newMetricHaproxyCompressionOutput(mbc.Metrics.HaproxyCompressionOutput),
- metricHaproxyConnectionsErrors: newMetricHaproxyConnectionsErrors(mbc.Metrics.HaproxyConnectionsErrors),
- metricHaproxyConnectionsRate: newMetricHaproxyConnectionsRate(mbc.Metrics.HaproxyConnectionsRate),
- metricHaproxyConnectionsRetries: newMetricHaproxyConnectionsRetries(mbc.Metrics.HaproxyConnectionsRetries),
- metricHaproxyConnectionsTotal: newMetricHaproxyConnectionsTotal(mbc.Metrics.HaproxyConnectionsTotal),
- metricHaproxyDowntime: newMetricHaproxyDowntime(mbc.Metrics.HaproxyDowntime),
- metricHaproxyFailedChecks: newMetricHaproxyFailedChecks(mbc.Metrics.HaproxyFailedChecks),
- metricHaproxyRequestsDenied: newMetricHaproxyRequestsDenied(mbc.Metrics.HaproxyRequestsDenied),
- metricHaproxyRequestsErrors: newMetricHaproxyRequestsErrors(mbc.Metrics.HaproxyRequestsErrors),
- metricHaproxyRequestsQueued: newMetricHaproxyRequestsQueued(mbc.Metrics.HaproxyRequestsQueued),
- metricHaproxyRequestsRate: newMetricHaproxyRequestsRate(mbc.Metrics.HaproxyRequestsRate),
- metricHaproxyRequestsRedispatched: newMetricHaproxyRequestsRedispatched(mbc.Metrics.HaproxyRequestsRedispatched),
- metricHaproxyRequestsTotal: newMetricHaproxyRequestsTotal(mbc.Metrics.HaproxyRequestsTotal),
- metricHaproxyResponsesDenied: newMetricHaproxyResponsesDenied(mbc.Metrics.HaproxyResponsesDenied),
- metricHaproxyResponsesErrors: newMetricHaproxyResponsesErrors(mbc.Metrics.HaproxyResponsesErrors),
- metricHaproxyServerSelectedTotal: newMetricHaproxyServerSelectedTotal(mbc.Metrics.HaproxyServerSelectedTotal),
- metricHaproxySessionsAverage: newMetricHaproxySessionsAverage(mbc.Metrics.HaproxySessionsAverage),
- metricHaproxySessionsCount: newMetricHaproxySessionsCount(mbc.Metrics.HaproxySessionsCount),
- metricHaproxySessionsRate: newMetricHaproxySessionsRate(mbc.Metrics.HaproxySessionsRate),
- metricHaproxySessionsTotal: newMetricHaproxySessionsTotal(mbc.Metrics.HaproxySessionsTotal),
- }
- for _, op := range options {
- op(mb)
- }
- return mb
- }
- // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
- func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
- return NewResourceBuilder(mb.config.ResourceAttributes)
- }
- // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
- func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
- if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
- mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
- }
- }
- // ResourceMetricsOption applies changes to provided resource metrics.
- type ResourceMetricsOption func(pmetric.ResourceMetrics)
- // WithResource sets the provided resource on the emitted ResourceMetrics.
- // It's recommended to use ResourceBuilder to create the resource.
- func WithResource(res pcommon.Resource) ResourceMetricsOption {
- return func(rm pmetric.ResourceMetrics) {
- res.CopyTo(rm.Resource())
- }
- }
- // WithStartTimeOverride overrides start time for all the resource metrics data points.
- // This option should be only used if different start time has to be set on metrics coming from different resources.
- func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
- return func(rm pmetric.ResourceMetrics) {
- var dps pmetric.NumberDataPointSlice
- metrics := rm.ScopeMetrics().At(0).Metrics()
- for i := 0; i < metrics.Len(); i++ {
- switch metrics.At(i).Type() {
- case pmetric.MetricTypeGauge:
- dps = metrics.At(i).Gauge().DataPoints()
- case pmetric.MetricTypeSum:
- dps = metrics.At(i).Sum().DataPoints()
- }
- for j := 0; j < dps.Len(); j++ {
- dps.At(j).SetStartTimestamp(start)
- }
- }
- }
- }
- // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
- // recording another set of data points as part of another resource. This function can be helpful when one scraper
- // needs to emit metrics from several resources. Otherwise calling this function is not required,
- // just `Emit` function can be called instead.
- // Resource attributes should be provided as ResourceMetricsOption arguments.
- func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
- rm := pmetric.NewResourceMetrics()
- ils := rm.ScopeMetrics().AppendEmpty()
- ils.Scope().SetName("otelcol/haproxyreceiver")
- ils.Scope().SetVersion(mb.buildInfo.Version)
- ils.Metrics().EnsureCapacity(mb.metricsCapacity)
- mb.metricHaproxyBytesInput.emit(ils.Metrics())
- mb.metricHaproxyBytesOutput.emit(ils.Metrics())
- mb.metricHaproxyClientsCanceled.emit(ils.Metrics())
- mb.metricHaproxyCompressionBypass.emit(ils.Metrics())
- mb.metricHaproxyCompressionCount.emit(ils.Metrics())
- mb.metricHaproxyCompressionInput.emit(ils.Metrics())
- mb.metricHaproxyCompressionOutput.emit(ils.Metrics())
- mb.metricHaproxyConnectionsErrors.emit(ils.Metrics())
- mb.metricHaproxyConnectionsRate.emit(ils.Metrics())
- mb.metricHaproxyConnectionsRetries.emit(ils.Metrics())
- mb.metricHaproxyConnectionsTotal.emit(ils.Metrics())
- mb.metricHaproxyDowntime.emit(ils.Metrics())
- mb.metricHaproxyFailedChecks.emit(ils.Metrics())
- mb.metricHaproxyRequestsDenied.emit(ils.Metrics())
- mb.metricHaproxyRequestsErrors.emit(ils.Metrics())
- mb.metricHaproxyRequestsQueued.emit(ils.Metrics())
- mb.metricHaproxyRequestsRate.emit(ils.Metrics())
- mb.metricHaproxyRequestsRedispatched.emit(ils.Metrics())
- mb.metricHaproxyRequestsTotal.emit(ils.Metrics())
- mb.metricHaproxyResponsesDenied.emit(ils.Metrics())
- mb.metricHaproxyResponsesErrors.emit(ils.Metrics())
- mb.metricHaproxyServerSelectedTotal.emit(ils.Metrics())
- mb.metricHaproxySessionsAverage.emit(ils.Metrics())
- mb.metricHaproxySessionsCount.emit(ils.Metrics())
- mb.metricHaproxySessionsRate.emit(ils.Metrics())
- mb.metricHaproxySessionsTotal.emit(ils.Metrics())
- for _, op := range rmo {
- op(rm)
- }
- if ils.Metrics().Len() > 0 {
- mb.updateCapacity(rm)
- rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
- }
- }
- // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
- // recording another set of metrics. This function will be responsible for applying all the transformations required to
- // produce metric representation defined in metadata and user config, e.g. delta or cumulative.
- func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
- mb.EmitForResource(rmo...)
- metrics := mb.metricsBuffer
- mb.metricsBuffer = pmetric.NewMetrics()
- return metrics
- }
- // RecordHaproxyBytesInputDataPoint adds a data point to haproxy.bytes.input metric.
- func (mb *MetricsBuilder) RecordHaproxyBytesInputDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyBytesInput, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyBytesInput.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyBytesOutputDataPoint adds a data point to haproxy.bytes.output metric.
- func (mb *MetricsBuilder) RecordHaproxyBytesOutputDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyBytesOutput, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyBytesOutput.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyClientsCanceledDataPoint adds a data point to haproxy.clients.canceled metric.
- func (mb *MetricsBuilder) RecordHaproxyClientsCanceledDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyClientsCanceled, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyClientsCanceled.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyCompressionBypassDataPoint adds a data point to haproxy.compression.bypass metric.
- func (mb *MetricsBuilder) RecordHaproxyCompressionBypassDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyCompressionBypass, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyCompressionBypass.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyCompressionCountDataPoint adds a data point to haproxy.compression.count metric.
- func (mb *MetricsBuilder) RecordHaproxyCompressionCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyCompressionCount, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyCompressionCount.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyCompressionInputDataPoint adds a data point to haproxy.compression.input metric.
- func (mb *MetricsBuilder) RecordHaproxyCompressionInputDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyCompressionInput, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyCompressionInput.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyCompressionOutputDataPoint adds a data point to haproxy.compression.output metric.
- func (mb *MetricsBuilder) RecordHaproxyCompressionOutputDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyCompressionOutput, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyCompressionOutput.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyConnectionsErrorsDataPoint adds a data point to haproxy.connections.errors metric.
- func (mb *MetricsBuilder) RecordHaproxyConnectionsErrorsDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyConnectionsErrors, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyConnectionsErrors.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyConnectionsRateDataPoint adds a data point to haproxy.connections.rate metric.
- func (mb *MetricsBuilder) RecordHaproxyConnectionsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyConnectionsRate, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyConnectionsRate.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyConnectionsRetriesDataPoint adds a data point to haproxy.connections.retries metric.
- func (mb *MetricsBuilder) RecordHaproxyConnectionsRetriesDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyConnectionsRetries, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyConnectionsRetries.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyConnectionsTotalDataPoint adds a data point to haproxy.connections.total metric.
- func (mb *MetricsBuilder) RecordHaproxyConnectionsTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyConnectionsTotal, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyConnectionsTotal.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyDowntimeDataPoint adds a data point to haproxy.downtime metric.
- func (mb *MetricsBuilder) RecordHaproxyDowntimeDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyDowntime, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyDowntime.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyFailedChecksDataPoint adds a data point to haproxy.failed_checks metric.
- func (mb *MetricsBuilder) RecordHaproxyFailedChecksDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyFailedChecks, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyFailedChecks.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsDeniedDataPoint adds a data point to haproxy.requests.denied metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsDeniedDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyRequestsDenied, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsDenied.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsErrorsDataPoint adds a data point to haproxy.requests.errors metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsErrorsDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyRequestsErrors, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsErrors.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsQueuedDataPoint adds a data point to haproxy.requests.queued metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsQueuedDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyRequestsQueued, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsQueued.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsRateDataPoint adds a data point to haproxy.requests.rate metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseFloat(inputVal, 64)
- if err != nil {
- return fmt.Errorf("failed to parse float64 for HaproxyRequestsRate, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsRate.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsRedispatchedDataPoint adds a data point to haproxy.requests.redispatched metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsRedispatchedDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyRequestsRedispatched, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsRedispatched.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyRequestsTotalDataPoint adds a data point to haproxy.requests.total metric.
- func (mb *MetricsBuilder) RecordHaproxyRequestsTotalDataPoint(ts pcommon.Timestamp, inputVal string, statusCodeAttributeValue AttributeStatusCode) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyRequestsTotal, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyRequestsTotal.recordDataPoint(mb.startTime, ts, val, statusCodeAttributeValue.String())
- return nil
- }
- // RecordHaproxyResponsesDeniedDataPoint adds a data point to haproxy.responses.denied metric.
- func (mb *MetricsBuilder) RecordHaproxyResponsesDeniedDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyResponsesDenied, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyResponsesDenied.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxyResponsesErrorsDataPoint adds a data point to haproxy.responses.errors metric.
- func (mb *MetricsBuilder) RecordHaproxyResponsesErrorsDataPoint(ts pcommon.Timestamp, val int64) {
- mb.metricHaproxyResponsesErrors.recordDataPoint(mb.startTime, ts, val)
- }
- // RecordHaproxyServerSelectedTotalDataPoint adds a data point to haproxy.server_selected.total metric.
- func (mb *MetricsBuilder) RecordHaproxyServerSelectedTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxyServerSelectedTotal, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxyServerSelectedTotal.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxySessionsAverageDataPoint adds a data point to haproxy.sessions.average metric.
- func (mb *MetricsBuilder) RecordHaproxySessionsAverageDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseFloat(inputVal, 64)
- if err != nil {
- return fmt.Errorf("failed to parse float64 for HaproxySessionsAverage, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxySessionsAverage.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxySessionsCountDataPoint adds a data point to haproxy.sessions.count metric.
- func (mb *MetricsBuilder) RecordHaproxySessionsCountDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxySessionsCount, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxySessionsCount.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxySessionsRateDataPoint adds a data point to haproxy.sessions.rate metric.
- func (mb *MetricsBuilder) RecordHaproxySessionsRateDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseFloat(inputVal, 64)
- if err != nil {
- return fmt.Errorf("failed to parse float64 for HaproxySessionsRate, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxySessionsRate.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // RecordHaproxySessionsTotalDataPoint adds a data point to haproxy.sessions.total metric.
- func (mb *MetricsBuilder) RecordHaproxySessionsTotalDataPoint(ts pcommon.Timestamp, inputVal string) error {
- val, err := strconv.ParseInt(inputVal, 10, 64)
- if err != nil {
- return fmt.Errorf("failed to parse int64 for HaproxySessionsTotal, value was %s: %w", inputVal, err)
- }
- mb.metricHaproxySessionsTotal.recordDataPoint(mb.startTime, ts, val)
- return nil
- }
- // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
- // and metrics builder should update its startTime and reset it's internal state accordingly.
- func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
- mb.startTime = pcommon.NewTimestampFromTime(time.Now())
- for _, op := range options {
- op(mb)
- }
- }
|