generated_metrics.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. // Code generated by mdatagen. DO NOT EDIT.
  2. package metadata
  3. import (
  4. "time"
  5. "go.opentelemetry.io/collector/component"
  6. "go.opentelemetry.io/collector/pdata/pcommon"
  7. "go.opentelemetry.io/collector/pdata/pmetric"
  8. "go.opentelemetry.io/collector/receiver"
  9. )
  10. // AttributeOperation specifies the a value operation attribute.
  11. type AttributeOperation int
  12. const (
  13. _ AttributeOperation = iota
  14. AttributeOperationRead
  15. AttributeOperationWrite
  16. AttributeOperationDelete
  17. )
  18. // String returns the string representation of the AttributeOperation.
  19. func (av AttributeOperation) String() string {
  20. switch av {
  21. case AttributeOperationRead:
  22. return "read"
  23. case AttributeOperationWrite:
  24. return "write"
  25. case AttributeOperationDelete:
  26. return "delete"
  27. }
  28. return ""
  29. }
  30. // MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
  31. var MapAttributeOperation = map[string]AttributeOperation{
  32. "read": AttributeOperationRead,
  33. "write": AttributeOperationWrite,
  34. "delete": AttributeOperationDelete,
  35. }
  36. // AttributeRequest specifies the a value request attribute.
  37. type AttributeRequest int
  38. const (
  39. _ AttributeRequest = iota
  40. AttributeRequestPut
  41. AttributeRequestGet
  42. )
  43. // String returns the string representation of the AttributeRequest.
  44. func (av AttributeRequest) String() string {
  45. switch av {
  46. case AttributeRequestPut:
  47. return "put"
  48. case AttributeRequestGet:
  49. return "get"
  50. }
  51. return ""
  52. }
  53. // MapAttributeRequest is a helper map of string to AttributeRequest attribute value.
  54. var MapAttributeRequest = map[string]AttributeRequest{
  55. "put": AttributeRequestPut,
  56. "get": AttributeRequestGet,
  57. }
  58. type metricRiakMemoryLimit struct {
  59. data pmetric.Metric // data buffer for generated metric.
  60. config MetricConfig // metric config provided by user.
  61. capacity int // max observed number of data points added to the metric.
  62. }
  63. // init fills riak.memory.limit metric with initial data.
  64. func (m *metricRiakMemoryLimit) init() {
  65. m.data.SetName("riak.memory.limit")
  66. m.data.SetDescription("The amount of memory allocated to the node.")
  67. m.data.SetUnit("By")
  68. m.data.SetEmptySum()
  69. m.data.Sum().SetIsMonotonic(false)
  70. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  71. }
  72. func (m *metricRiakMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  73. if !m.config.Enabled {
  74. return
  75. }
  76. dp := m.data.Sum().DataPoints().AppendEmpty()
  77. dp.SetStartTimestamp(start)
  78. dp.SetTimestamp(ts)
  79. dp.SetIntValue(val)
  80. }
  81. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  82. func (m *metricRiakMemoryLimit) updateCapacity() {
  83. if m.data.Sum().DataPoints().Len() > m.capacity {
  84. m.capacity = m.data.Sum().DataPoints().Len()
  85. }
  86. }
  87. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  88. func (m *metricRiakMemoryLimit) emit(metrics pmetric.MetricSlice) {
  89. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  90. m.updateCapacity()
  91. m.data.MoveTo(metrics.AppendEmpty())
  92. m.init()
  93. }
  94. }
  95. func newMetricRiakMemoryLimit(cfg MetricConfig) metricRiakMemoryLimit {
  96. m := metricRiakMemoryLimit{config: cfg}
  97. if cfg.Enabled {
  98. m.data = pmetric.NewMetric()
  99. m.init()
  100. }
  101. return m
  102. }
  103. type metricRiakNodeOperationCount struct {
  104. data pmetric.Metric // data buffer for generated metric.
  105. config MetricConfig // metric config provided by user.
  106. capacity int // max observed number of data points added to the metric.
  107. }
  108. // init fills riak.node.operation.count metric with initial data.
  109. func (m *metricRiakNodeOperationCount) init() {
  110. m.data.SetName("riak.node.operation.count")
  111. m.data.SetDescription("The number of operations performed by the node.")
  112. m.data.SetUnit("{operation}")
  113. m.data.SetEmptySum()
  114. m.data.Sum().SetIsMonotonic(true)
  115. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  116. m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
  117. }
  118. func (m *metricRiakNodeOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) {
  119. if !m.config.Enabled {
  120. return
  121. }
  122. dp := m.data.Sum().DataPoints().AppendEmpty()
  123. dp.SetStartTimestamp(start)
  124. dp.SetTimestamp(ts)
  125. dp.SetIntValue(val)
  126. dp.Attributes().PutStr("request", requestAttributeValue)
  127. }
  128. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  129. func (m *metricRiakNodeOperationCount) updateCapacity() {
  130. if m.data.Sum().DataPoints().Len() > m.capacity {
  131. m.capacity = m.data.Sum().DataPoints().Len()
  132. }
  133. }
  134. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  135. func (m *metricRiakNodeOperationCount) emit(metrics pmetric.MetricSlice) {
  136. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  137. m.updateCapacity()
  138. m.data.MoveTo(metrics.AppendEmpty())
  139. m.init()
  140. }
  141. }
  142. func newMetricRiakNodeOperationCount(cfg MetricConfig) metricRiakNodeOperationCount {
  143. m := metricRiakNodeOperationCount{config: cfg}
  144. if cfg.Enabled {
  145. m.data = pmetric.NewMetric()
  146. m.init()
  147. }
  148. return m
  149. }
  150. type metricRiakNodeOperationTimeMean struct {
  151. data pmetric.Metric // data buffer for generated metric.
  152. config MetricConfig // metric config provided by user.
  153. capacity int // max observed number of data points added to the metric.
  154. }
  155. // init fills riak.node.operation.time.mean metric with initial data.
  156. func (m *metricRiakNodeOperationTimeMean) init() {
  157. m.data.SetName("riak.node.operation.time.mean")
  158. m.data.SetDescription("The mean time between request and response for operations performed by the node over the last minute.")
  159. m.data.SetUnit("us")
  160. m.data.SetEmptyGauge()
  161. m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
  162. }
  163. func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) {
  164. if !m.config.Enabled {
  165. return
  166. }
  167. dp := m.data.Gauge().DataPoints().AppendEmpty()
  168. dp.SetStartTimestamp(start)
  169. dp.SetTimestamp(ts)
  170. dp.SetIntValue(val)
  171. dp.Attributes().PutStr("request", requestAttributeValue)
  172. }
  173. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  174. func (m *metricRiakNodeOperationTimeMean) updateCapacity() {
  175. if m.data.Gauge().DataPoints().Len() > m.capacity {
  176. m.capacity = m.data.Gauge().DataPoints().Len()
  177. }
  178. }
  179. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  180. func (m *metricRiakNodeOperationTimeMean) emit(metrics pmetric.MetricSlice) {
  181. if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
  182. m.updateCapacity()
  183. m.data.MoveTo(metrics.AppendEmpty())
  184. m.init()
  185. }
  186. }
  187. func newMetricRiakNodeOperationTimeMean(cfg MetricConfig) metricRiakNodeOperationTimeMean {
  188. m := metricRiakNodeOperationTimeMean{config: cfg}
  189. if cfg.Enabled {
  190. m.data = pmetric.NewMetric()
  191. m.init()
  192. }
  193. return m
  194. }
  195. type metricRiakNodeReadRepairCount struct {
  196. data pmetric.Metric // data buffer for generated metric.
  197. config MetricConfig // metric config provided by user.
  198. capacity int // max observed number of data points added to the metric.
  199. }
  200. // init fills riak.node.read_repair.count metric with initial data.
  201. func (m *metricRiakNodeReadRepairCount) init() {
  202. m.data.SetName("riak.node.read_repair.count")
  203. m.data.SetDescription("The number of read repairs performed by the node.")
  204. m.data.SetUnit("{read_repair}")
  205. m.data.SetEmptySum()
  206. m.data.Sum().SetIsMonotonic(true)
  207. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  208. }
  209. func (m *metricRiakNodeReadRepairCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
  210. if !m.config.Enabled {
  211. return
  212. }
  213. dp := m.data.Sum().DataPoints().AppendEmpty()
  214. dp.SetStartTimestamp(start)
  215. dp.SetTimestamp(ts)
  216. dp.SetIntValue(val)
  217. }
  218. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  219. func (m *metricRiakNodeReadRepairCount) updateCapacity() {
  220. if m.data.Sum().DataPoints().Len() > m.capacity {
  221. m.capacity = m.data.Sum().DataPoints().Len()
  222. }
  223. }
  224. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  225. func (m *metricRiakNodeReadRepairCount) emit(metrics pmetric.MetricSlice) {
  226. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  227. m.updateCapacity()
  228. m.data.MoveTo(metrics.AppendEmpty())
  229. m.init()
  230. }
  231. }
  232. func newMetricRiakNodeReadRepairCount(cfg MetricConfig) metricRiakNodeReadRepairCount {
  233. m := metricRiakNodeReadRepairCount{config: cfg}
  234. if cfg.Enabled {
  235. m.data = pmetric.NewMetric()
  236. m.init()
  237. }
  238. return m
  239. }
  240. type metricRiakVnodeIndexOperationCount struct {
  241. data pmetric.Metric // data buffer for generated metric.
  242. config MetricConfig // metric config provided by user.
  243. capacity int // max observed number of data points added to the metric.
  244. }
  245. // init fills riak.vnode.index.operation.count metric with initial data.
  246. func (m *metricRiakVnodeIndexOperationCount) init() {
  247. m.data.SetName("riak.vnode.index.operation.count")
  248. m.data.SetDescription("The number of index operations performed by vnodes on the node.")
  249. m.data.SetUnit("{operation}")
  250. m.data.SetEmptySum()
  251. m.data.Sum().SetIsMonotonic(false)
  252. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  253. m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
  254. }
  255. func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) {
  256. if !m.config.Enabled {
  257. return
  258. }
  259. dp := m.data.Sum().DataPoints().AppendEmpty()
  260. dp.SetStartTimestamp(start)
  261. dp.SetTimestamp(ts)
  262. dp.SetIntValue(val)
  263. dp.Attributes().PutStr("operation", operationAttributeValue)
  264. }
  265. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  266. func (m *metricRiakVnodeIndexOperationCount) updateCapacity() {
  267. if m.data.Sum().DataPoints().Len() > m.capacity {
  268. m.capacity = m.data.Sum().DataPoints().Len()
  269. }
  270. }
  271. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  272. func (m *metricRiakVnodeIndexOperationCount) emit(metrics pmetric.MetricSlice) {
  273. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  274. m.updateCapacity()
  275. m.data.MoveTo(metrics.AppendEmpty())
  276. m.init()
  277. }
  278. }
  279. func newMetricRiakVnodeIndexOperationCount(cfg MetricConfig) metricRiakVnodeIndexOperationCount {
  280. m := metricRiakVnodeIndexOperationCount{config: cfg}
  281. if cfg.Enabled {
  282. m.data = pmetric.NewMetric()
  283. m.init()
  284. }
  285. return m
  286. }
  287. type metricRiakVnodeOperationCount struct {
  288. data pmetric.Metric // data buffer for generated metric.
  289. config MetricConfig // metric config provided by user.
  290. capacity int // max observed number of data points added to the metric.
  291. }
  292. // init fills riak.vnode.operation.count metric with initial data.
  293. func (m *metricRiakVnodeOperationCount) init() {
  294. m.data.SetName("riak.vnode.operation.count")
  295. m.data.SetDescription("The number of operations performed by vnodes on the node.")
  296. m.data.SetUnit("{operation}")
  297. m.data.SetEmptySum()
  298. m.data.Sum().SetIsMonotonic(true)
  299. m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
  300. m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
  301. }
  302. func (m *metricRiakVnodeOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) {
  303. if !m.config.Enabled {
  304. return
  305. }
  306. dp := m.data.Sum().DataPoints().AppendEmpty()
  307. dp.SetStartTimestamp(start)
  308. dp.SetTimestamp(ts)
  309. dp.SetIntValue(val)
  310. dp.Attributes().PutStr("request", requestAttributeValue)
  311. }
  312. // updateCapacity saves max length of data point slices that will be used for the slice capacity.
  313. func (m *metricRiakVnodeOperationCount) updateCapacity() {
  314. if m.data.Sum().DataPoints().Len() > m.capacity {
  315. m.capacity = m.data.Sum().DataPoints().Len()
  316. }
  317. }
  318. // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
  319. func (m *metricRiakVnodeOperationCount) emit(metrics pmetric.MetricSlice) {
  320. if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
  321. m.updateCapacity()
  322. m.data.MoveTo(metrics.AppendEmpty())
  323. m.init()
  324. }
  325. }
  326. func newMetricRiakVnodeOperationCount(cfg MetricConfig) metricRiakVnodeOperationCount {
  327. m := metricRiakVnodeOperationCount{config: cfg}
  328. if cfg.Enabled {
  329. m.data = pmetric.NewMetric()
  330. m.init()
  331. }
  332. return m
  333. }
  334. // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
  335. // required to produce metric representation defined in metadata and user config.
  336. type MetricsBuilder struct {
  337. config MetricsBuilderConfig // config of the metrics builder.
  338. startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
  339. metricsCapacity int // maximum observed number of metrics per resource.
  340. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
  341. buildInfo component.BuildInfo // contains version information.
  342. metricRiakMemoryLimit metricRiakMemoryLimit
  343. metricRiakNodeOperationCount metricRiakNodeOperationCount
  344. metricRiakNodeOperationTimeMean metricRiakNodeOperationTimeMean
  345. metricRiakNodeReadRepairCount metricRiakNodeReadRepairCount
  346. metricRiakVnodeIndexOperationCount metricRiakVnodeIndexOperationCount
  347. metricRiakVnodeOperationCount metricRiakVnodeOperationCount
  348. }
  349. // metricBuilderOption applies changes to default metrics builder.
  350. type metricBuilderOption func(*MetricsBuilder)
  351. // WithStartTime sets startTime on the metrics builder.
  352. func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
  353. return func(mb *MetricsBuilder) {
  354. mb.startTime = startTime
  355. }
  356. }
  357. func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder {
  358. mb := &MetricsBuilder{
  359. config: mbc,
  360. startTime: pcommon.NewTimestampFromTime(time.Now()),
  361. metricsBuffer: pmetric.NewMetrics(),
  362. buildInfo: settings.BuildInfo,
  363. metricRiakMemoryLimit: newMetricRiakMemoryLimit(mbc.Metrics.RiakMemoryLimit),
  364. metricRiakNodeOperationCount: newMetricRiakNodeOperationCount(mbc.Metrics.RiakNodeOperationCount),
  365. metricRiakNodeOperationTimeMean: newMetricRiakNodeOperationTimeMean(mbc.Metrics.RiakNodeOperationTimeMean),
  366. metricRiakNodeReadRepairCount: newMetricRiakNodeReadRepairCount(mbc.Metrics.RiakNodeReadRepairCount),
  367. metricRiakVnodeIndexOperationCount: newMetricRiakVnodeIndexOperationCount(mbc.Metrics.RiakVnodeIndexOperationCount),
  368. metricRiakVnodeOperationCount: newMetricRiakVnodeOperationCount(mbc.Metrics.RiakVnodeOperationCount),
  369. }
  370. for _, op := range options {
  371. op(mb)
  372. }
  373. return mb
  374. }
  375. // NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
  376. func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
  377. return NewResourceBuilder(mb.config.ResourceAttributes)
  378. }
  379. // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
  380. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
  381. if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
  382. mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
  383. }
  384. }
  385. // ResourceMetricsOption applies changes to provided resource metrics.
  386. type ResourceMetricsOption func(pmetric.ResourceMetrics)
  387. // WithResource sets the provided resource on the emitted ResourceMetrics.
  388. // It's recommended to use ResourceBuilder to create the resource.
  389. func WithResource(res pcommon.Resource) ResourceMetricsOption {
  390. return func(rm pmetric.ResourceMetrics) {
  391. res.CopyTo(rm.Resource())
  392. }
  393. }
  394. // WithStartTimeOverride overrides start time for all the resource metrics data points.
  395. // This option should be only used if different start time has to be set on metrics coming from different resources.
  396. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
  397. return func(rm pmetric.ResourceMetrics) {
  398. var dps pmetric.NumberDataPointSlice
  399. metrics := rm.ScopeMetrics().At(0).Metrics()
  400. for i := 0; i < metrics.Len(); i++ {
  401. switch metrics.At(i).Type() {
  402. case pmetric.MetricTypeGauge:
  403. dps = metrics.At(i).Gauge().DataPoints()
  404. case pmetric.MetricTypeSum:
  405. dps = metrics.At(i).Sum().DataPoints()
  406. }
  407. for j := 0; j < dps.Len(); j++ {
  408. dps.At(j).SetStartTimestamp(start)
  409. }
  410. }
  411. }
  412. }
  413. // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
  414. // recording another set of data points as part of another resource. This function can be helpful when one scraper
  415. // needs to emit metrics from several resources. Otherwise calling this function is not required,
  416. // just `Emit` function can be called instead.
  417. // Resource attributes should be provided as ResourceMetricsOption arguments.
  418. func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
  419. rm := pmetric.NewResourceMetrics()
  420. ils := rm.ScopeMetrics().AppendEmpty()
  421. ils.Scope().SetName("otelcol/riakreceiver")
  422. ils.Scope().SetVersion(mb.buildInfo.Version)
  423. ils.Metrics().EnsureCapacity(mb.metricsCapacity)
  424. mb.metricRiakMemoryLimit.emit(ils.Metrics())
  425. mb.metricRiakNodeOperationCount.emit(ils.Metrics())
  426. mb.metricRiakNodeOperationTimeMean.emit(ils.Metrics())
  427. mb.metricRiakNodeReadRepairCount.emit(ils.Metrics())
  428. mb.metricRiakVnodeIndexOperationCount.emit(ils.Metrics())
  429. mb.metricRiakVnodeOperationCount.emit(ils.Metrics())
  430. for _, op := range rmo {
  431. op(rm)
  432. }
  433. if ils.Metrics().Len() > 0 {
  434. mb.updateCapacity(rm)
  435. rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
  436. }
  437. }
  438. // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
  439. // recording another set of metrics. This function will be responsible for applying all the transformations required to
  440. // produce metric representation defined in metadata and user config, e.g. delta or cumulative.
  441. func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics {
  442. mb.EmitForResource(rmo...)
  443. metrics := mb.metricsBuffer
  444. mb.metricsBuffer = pmetric.NewMetrics()
  445. return metrics
  446. }
  447. // RecordRiakMemoryLimitDataPoint adds a data point to riak.memory.limit metric.
  448. func (mb *MetricsBuilder) RecordRiakMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) {
  449. mb.metricRiakMemoryLimit.recordDataPoint(mb.startTime, ts, val)
  450. }
  451. // RecordRiakNodeOperationCountDataPoint adds a data point to riak.node.operation.count metric.
  452. func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
  453. mb.metricRiakNodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
  454. }
  455. // RecordRiakNodeOperationTimeMeanDataPoint adds a data point to riak.node.operation.time.mean metric.
  456. func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
  457. mb.metricRiakNodeOperationTimeMean.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
  458. }
  459. // RecordRiakNodeReadRepairCountDataPoint adds a data point to riak.node.read_repair.count metric.
  460. func (mb *MetricsBuilder) RecordRiakNodeReadRepairCountDataPoint(ts pcommon.Timestamp, val int64) {
  461. mb.metricRiakNodeReadRepairCount.recordDataPoint(mb.startTime, ts, val)
  462. }
  463. // RecordRiakVnodeIndexOperationCountDataPoint adds a data point to riak.vnode.index.operation.count metric.
  464. func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) {
  465. mb.metricRiakVnodeIndexOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String())
  466. }
  467. // RecordRiakVnodeOperationCountDataPoint adds a data point to riak.vnode.operation.count metric.
  468. func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue AttributeRequest) {
  469. mb.metricRiakVnodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue.String())
  470. }
  471. // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
  472. // and metrics builder should update its startTime and reset it's internal state accordingly.
  473. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
  474. mb.startTime = pcommon.NewTimestampFromTime(time.Now())
  475. for _, op := range options {
  476. op(mb)
  477. }
  478. }