summary_test.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. package detailed_test
  2. import (
  3. "context"
  4. "sort"
  5. "testing"
  6. "time"
  7. "github.com/weaveworks/common/mtime"
  8. "github.com/weaveworks/common/test"
  9. "github.com/weaveworks/scope/probe/docker"
  10. "github.com/weaveworks/scope/probe/process"
  11. "github.com/weaveworks/scope/render"
  12. "github.com/weaveworks/scope/render/detailed"
  13. "github.com/weaveworks/scope/render/expected"
  14. "github.com/weaveworks/scope/report"
  15. "github.com/weaveworks/scope/test/fixture"
  16. "github.com/weaveworks/scope/test/reflect"
  17. )
  18. func TestSummaries(t *testing.T) {
  19. {
  20. // Just a convenient source of some rendered nodes
  21. have := detailed.Summaries(context.Background(), detailed.RenderContext{Report: fixture.Report}, render.ProcessRenderer.Render(context.Background(), fixture.Report).Nodes)
  22. // The ids of the processes rendered above
  23. expectedIDs := []string{
  24. fixture.ClientProcess1NodeID,
  25. fixture.ClientProcess2NodeID,
  26. fixture.ServerProcessNodeID,
  27. fixture.NonContainerProcessNodeID,
  28. render.IncomingInternetID,
  29. render.OutgoingInternetID,
  30. }
  31. sort.Strings(expectedIDs)
  32. // It should summarize each node
  33. ids := []string{}
  34. for id := range have {
  35. ids = append(ids, id)
  36. }
  37. sort.Strings(ids)
  38. if !reflect.DeepEqual(expectedIDs, ids) {
  39. t.Fatalf("Expected Summaries to have summarized every node in the process renderer: %v, but got %v", expectedIDs, ids)
  40. }
  41. }
  42. // It should summarize nodes' metrics
  43. {
  44. t1, t2 := mtime.Now().Add(-1*time.Minute), mtime.Now()
  45. metric := report.MakeMetric([]report.Sample{{Timestamp: t1, Value: 1}, {Timestamp: t2, Value: 2}})
  46. input := fixture.Report.Copy()
  47. processNode := input.Process.Nodes[fixture.ClientProcess1NodeID]
  48. processNode.Metrics = processNode.Metrics.Copy()
  49. processNode.Metrics[process.CPUUsage] = metric
  50. input.Process.Nodes[fixture.ClientProcess1NodeID] = processNode
  51. have := detailed.Summaries(context.Background(), detailed.RenderContext{Report: input}, render.ProcessRenderer.Render(context.Background(), input).Nodes)
  52. node, ok := have[fixture.ClientProcess1NodeID]
  53. if !ok {
  54. t.Fatalf("Expected output to have the node we added the metric to")
  55. }
  56. var row report.MetricRow
  57. ok = false
  58. for _, metric := range node.Metrics {
  59. if metric.ID == process.CPUUsage {
  60. row = metric
  61. ok = true
  62. break
  63. }
  64. }
  65. if !ok {
  66. t.Fatalf("Expected node to have the metric we added")
  67. }
  68. // Our summarized MetricRow
  69. want := report.MetricRow{
  70. ID: process.CPUUsage,
  71. Label: "CPU",
  72. Format: "percent",
  73. Value: 2,
  74. Priority: 1,
  75. Metric: &report.Metric{
  76. Samples: nil,
  77. Min: metric.Min,
  78. Max: metric.Max,
  79. },
  80. }
  81. if !reflect.DeepEqual(want, row) {
  82. t.Fatalf("Expected to have summarized the node's metrics: %s", test.Diff(want, row))
  83. }
  84. }
  85. }
  86. func TestMakeNodeSummary(t *testing.T) {
  87. testcases := []struct {
  88. name string
  89. input report.Node
  90. ok bool
  91. want detailed.NodeSummary
  92. }{
  93. {
  94. name: "single process rendering",
  95. input: expected.RenderedProcesses[fixture.ClientProcess1NodeID],
  96. ok: true,
  97. want: detailed.NodeSummary{
  98. BasicNodeSummary: detailed.BasicNodeSummary{
  99. ID: fixture.ClientProcess1NodeID,
  100. Label: fixture.Client1Name,
  101. LabelMinor: "client.hostname.com (10001)",
  102. Rank: fixture.Client1Name,
  103. Shape: "square",
  104. Tag: "",
  105. },
  106. Metadata: []report.MetadataRow{
  107. {ID: process.PID, Label: "PID", Value: fixture.Client1PID, Priority: 1, Datatype: report.Number},
  108. },
  109. Adjacency: report.MakeIDList(fixture.ServerProcessNodeID),
  110. },
  111. },
  112. {
  113. name: "single container rendering",
  114. input: expected.RenderedContainers[fixture.ClientContainerNodeID],
  115. ok: true,
  116. want: detailed.NodeSummary{
  117. BasicNodeSummary: detailed.BasicNodeSummary{
  118. ID: fixture.ClientContainerNodeID,
  119. Label: fixture.ClientContainerName,
  120. LabelMinor: fixture.ClientHostName,
  121. Rank: fixture.ClientContainerImageName,
  122. Shape: "hexagon",
  123. Tag: "",
  124. },
  125. Metadata: []report.MetadataRow{
  126. {ID: docker.ImageName, Label: "Image name", Value: fixture.ClientContainerImageName, Priority: 2},
  127. {ID: docker.ContainerID, Label: "ID", Value: fixture.ClientContainerID, Priority: 11, Truncate: 12},
  128. },
  129. Adjacency: report.MakeIDList(fixture.ServerContainerNodeID),
  130. },
  131. },
  132. {
  133. name: "single container image rendering",
  134. input: expected.RenderedContainerImages[expected.ClientContainerImageNodeID],
  135. ok: true,
  136. want: detailed.NodeSummary{
  137. BasicNodeSummary: detailed.BasicNodeSummary{
  138. ID: expected.ClientContainerImageNodeID,
  139. Label: fixture.ClientContainerImageName,
  140. LabelMinor: "1 container",
  141. Rank: fixture.ClientContainerImageName,
  142. Shape: "hexagon",
  143. Tag: "",
  144. Stack: true,
  145. },
  146. Metadata: []report.MetadataRow{
  147. {ID: report.Container, Label: "# Containers", Value: "1", Priority: 2, Datatype: report.Number},
  148. },
  149. Adjacency: report.MakeIDList(expected.ServerContainerImageNodeID),
  150. },
  151. },
  152. {
  153. name: "single host rendering",
  154. input: expected.RenderedHosts[fixture.ClientHostNodeID],
  155. ok: true,
  156. want: detailed.NodeSummary{
  157. BasicNodeSummary: detailed.BasicNodeSummary{
  158. ID: fixture.ClientHostNodeID,
  159. Label: "client",
  160. LabelMinor: "hostname.com",
  161. Rank: "hostname.com",
  162. Shape: "circle",
  163. Tag: "",
  164. },
  165. Metadata: []report.MetadataRow{
  166. {ID: report.HostName, Label: "Hostname", Value: fixture.ClientHostName, Priority: 11},
  167. },
  168. Adjacency: report.MakeIDList(fixture.ServerHostNodeID),
  169. },
  170. },
  171. {
  172. name: "group node rendering",
  173. input: expected.RenderedProcessNames[fixture.ServerName],
  174. ok: true,
  175. want: detailed.NodeSummary{
  176. BasicNodeSummary: detailed.BasicNodeSummary{
  177. ID: "apache",
  178. Label: "apache",
  179. LabelMinor: "1 process",
  180. Rank: "apache",
  181. Shape: "square",
  182. Tag: "",
  183. Stack: true,
  184. },
  185. },
  186. },
  187. }
  188. for _, testcase := range testcases {
  189. have, ok := detailed.MakeNodeSummary(detailed.RenderContext{Report: fixture.Report}, testcase.input)
  190. if ok != testcase.ok {
  191. t.Errorf("%s: MakeNodeSummary failed: expected ok value to be: %v", testcase.name, testcase.ok)
  192. continue
  193. }
  194. if !reflect.DeepEqual(testcase.want, have) {
  195. t.Errorf("%s: Node Summary did not match: %s", testcase.name, test.Diff(testcase.want, have))
  196. }
  197. }
  198. }
  199. func TestMakeNodeSummaryNoMetadata(t *testing.T) {
  200. processNameTopology := render.MakeGroupNodeTopology(report.Process, process.Name)
  201. for topology, id := range map[string]string{
  202. render.Pseudo: render.MakePseudoNodeID("id"),
  203. report.Process: report.MakeProcessNodeID("ip-123-45-6-100", "1234"),
  204. report.Container: report.MakeContainerNodeID("0001accbecc2c95e650fe641926fb923b7cc307a71101a1200af3759227b6d7d"),
  205. report.ContainerImage: report.MakeContainerImageNodeID("0001accbecc2c95e650fe641926fb923b7cc307a71101a1200af3759227b6d7d"),
  206. report.Pod: report.MakePodNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  207. report.Service: report.MakeServiceNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  208. report.Deployment: report.MakeDeploymentNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  209. report.DaemonSet: report.MakeDaemonSetNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  210. report.StatefulSet: report.MakeStatefulSetNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  211. report.CronJob: report.MakeCronJobNodeID("005e2999-d429-11e7-8535-0a41257e78e8"),
  212. report.ECSTask: report.MakeECSTaskNodeID("arn:aws:ecs:us-east-1:012345678910:task/1dc5c17a-422b-4dc4-b493-371970c6c4d6"),
  213. report.ECSService: report.MakeECSServiceNodeID("cluster", "service"),
  214. report.SwarmService: report.MakeSwarmServiceNodeID("0001accbecc2c95e650fe641926fb923b7cc307a71101a1200af3759227b6d7d"),
  215. report.Host: report.MakeHostNodeID("ip-123-45-6-100"),
  216. report.Overlay: report.MakeOverlayNodeID("", "3e:ca:14:ca:12:5c"),
  217. processNameTopology: "/home/weave/scope",
  218. } {
  219. summary, b := detailed.MakeNodeSummary(detailed.RenderContext{}, report.MakeNode(id).WithTopology(topology))
  220. switch {
  221. case !b:
  222. t.Errorf("Node Summary missing for topology %s, id %s", topology, id)
  223. case summary.Label == "":
  224. t.Errorf("Node Summary Label missing for topology %s, id %s", topology, id)
  225. case summary.Label == id && topology != processNameTopology:
  226. t.Errorf("Node Summary Label same as id (that's cheating!) for topology %s, id %s", topology, id)
  227. }
  228. }
  229. }
  230. func TestNodeMetadata(t *testing.T) {
  231. inputs := []struct {
  232. name string
  233. node report.Node
  234. want []report.MetadataRow
  235. }{
  236. {
  237. name: "container",
  238. node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{
  239. docker.ContainerID: fixture.ClientContainerID,
  240. docker.LabelPrefix + "label1": "label1value",
  241. docker.ContainerStateHuman: report.StateRunning,
  242. }).WithTopology(report.Container).WithSets(report.MakeSets().
  243. Add(docker.ContainerIPs, report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24")),
  244. ),
  245. want: []report.MetadataRow{
  246. {ID: docker.ContainerStateHuman, Label: "State", Value: "running", Priority: 4},
  247. {ID: docker.ContainerIPs, Label: "IPs", Value: "10.10.10.0/24, 10.10.10.1/24", Priority: 8},
  248. {ID: docker.ContainerID, Label: "ID", Value: fixture.ClientContainerID, Priority: 11, Truncate: 12},
  249. },
  250. },
  251. {
  252. name: "unknown topology",
  253. node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{
  254. docker.ContainerID: fixture.ClientContainerID,
  255. }).WithTopology("foobar"),
  256. want: nil,
  257. },
  258. }
  259. for _, input := range inputs {
  260. summary, _ := detailed.MakeNodeSummary(detailed.RenderContext{Report: fixture.Report}, input.node)
  261. have := summary.Metadata
  262. if !reflect.DeepEqual(input.want, have) {
  263. t.Errorf("%s: %s", input.name, test.Diff(input.want, have))
  264. }
  265. }
  266. }
  267. func TestNodeMetrics(t *testing.T) {
  268. inputs := []struct {
  269. name string
  270. node report.Node
  271. want []report.MetricRow
  272. }{
  273. {
  274. name: "process",
  275. node: fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID],
  276. want: []report.MetricRow{
  277. {
  278. ID: process.CPUUsage,
  279. Label: "CPU",
  280. Format: "percent",
  281. Group: "",
  282. Value: 0.01,
  283. Priority: 1,
  284. Metric: &fixture.ClientProcess1CPUMetric,
  285. },
  286. {
  287. ID: process.MemoryUsage,
  288. Label: "Memory",
  289. Format: "filesize",
  290. Group: "",
  291. Value: 0.02,
  292. Priority: 2,
  293. Metric: &fixture.ClientProcess1MemoryMetric,
  294. },
  295. },
  296. },
  297. {
  298. name: "container",
  299. node: fixture.Report.Container.Nodes[fixture.ClientContainerNodeID],
  300. want: []report.MetricRow{
  301. {
  302. ID: docker.CPUTotalUsage,
  303. Label: "CPU",
  304. Format: "percent",
  305. Group: "",
  306. Value: 0.03,
  307. Priority: 1,
  308. Metric: &fixture.ClientContainerCPUMetric,
  309. },
  310. {
  311. ID: docker.MemoryUsage,
  312. Label: "Memory",
  313. Format: "filesize",
  314. Group: "",
  315. Value: 0.04,
  316. Priority: 2,
  317. Metric: &fixture.ClientContainerMemoryMetric,
  318. },
  319. },
  320. },
  321. {
  322. name: "host",
  323. node: fixture.Report.Host.Nodes[fixture.ClientHostNodeID],
  324. want: []report.MetricRow{
  325. {
  326. ID: report.HostCPUUsage,
  327. Label: "CPU",
  328. Format: "percent",
  329. Group: "",
  330. Value: 0.07,
  331. Priority: 1,
  332. Metric: &fixture.ClientHostCPUMetric,
  333. },
  334. {
  335. ID: report.HostMemoryUsage,
  336. Label: "Memory",
  337. Format: "filesize",
  338. Group: "",
  339. Value: 0.08,
  340. Priority: 2,
  341. Metric: &fixture.ClientHostMemoryMetric,
  342. },
  343. {
  344. ID: report.Load1,
  345. Label: "Load (1m)",
  346. Group: "load",
  347. Value: 0.09,
  348. Priority: 11,
  349. Metric: &fixture.ClientHostLoad1Metric,
  350. },
  351. },
  352. },
  353. {
  354. name: "unknown topology",
  355. node: report.MakeNode(fixture.ClientContainerNodeID).WithTopology("foobar"),
  356. want: nil,
  357. },
  358. }
  359. for _, input := range inputs {
  360. summary, _ := detailed.MakeNodeSummary(detailed.RenderContext{Report: fixture.Report}, input.node)
  361. have := summary.Metrics
  362. if !reflect.DeepEqual(input.want, have) {
  363. t.Errorf("%s: %s", input.name, test.Diff(input.want, have))
  364. }
  365. }
  366. }
  367. func TestMetricRowSummary(t *testing.T) {
  368. var (
  369. now = time.Now()
  370. metric = report.MakeSingletonMetric(now, 1.234)
  371. row = report.MetricRow{
  372. ID: "id",
  373. Format: "format",
  374. Group: "group",
  375. Value: 1.234,
  376. Priority: 1,
  377. Metric: &metric,
  378. }
  379. summary = row.Summary()
  380. )
  381. // summary should not have any samples
  382. if summary.Metric.Len() != 0 {
  383. t.Errorf("Expected summary to have no samples, but had %d", summary.Metric.Len())
  384. }
  385. // original metric should still have its samples
  386. if metric.Len() != 1 {
  387. t.Errorf("Expected original metric to still have it's samples, but had %d", metric.Len())
  388. }
  389. // summary should have all the same fields (minus the metric)
  390. summary.Metric = nil
  391. row.Metric = nil
  392. if !reflect.DeepEqual(summary, row) {
  393. t.Errorf("Expected summary to have same fields as original: %s", test.Diff(summary, row))
  394. }
  395. }
  396. func TestNodeTables(t *testing.T) {
  397. inputs := []struct {
  398. name string
  399. rpt report.Report
  400. node report.Node
  401. want []report.Table
  402. }{
  403. {
  404. name: "container",
  405. rpt: report.Report{
  406. Container: report.MakeTopology().
  407. WithTableTemplates(docker.ContainerTableTemplates),
  408. },
  409. node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{
  410. docker.ContainerID: fixture.ClientContainerID,
  411. docker.LabelPrefix + "label1": "label1value",
  412. docker.ContainerState: report.StateRunning,
  413. }).WithTopology(report.Container).WithSets(report.MakeSets().
  414. Add(docker.ContainerIPs, report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24")),
  415. ),
  416. want: []report.Table{
  417. {
  418. ID: docker.EnvPrefix,
  419. Type: report.PropertyListType,
  420. Label: "Environment variables",
  421. Rows: []report.Row{},
  422. },
  423. {
  424. ID: docker.LabelPrefix,
  425. Type: report.PropertyListType,
  426. Label: "Docker labels",
  427. Rows: []report.Row{
  428. {
  429. ID: "label_label1",
  430. Entries: map[string]string{
  431. "label": "label1",
  432. "value": "label1value",
  433. },
  434. },
  435. },
  436. },
  437. {
  438. ID: docker.ImageTableID,
  439. Type: report.PropertyListType,
  440. Label: "Image",
  441. Rows: []report.Row{},
  442. },
  443. },
  444. },
  445. {
  446. name: "unknown topology",
  447. rpt: report.MakeReport(),
  448. node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{
  449. docker.ContainerID: fixture.ClientContainerID,
  450. }).WithTopology("foobar"),
  451. want: nil,
  452. },
  453. }
  454. for _, input := range inputs {
  455. summary, _ := detailed.MakeNodeSummary(detailed.RenderContext{Report: input.rpt}, input.node)
  456. have := summary.Tables
  457. if !reflect.DeepEqual(input.want, have) {
  458. t.Errorf("%s: %s", input.name, test.Diff(input.want, have))
  459. }
  460. }
  461. }