feat: add intel_pmu plugin (#9724)
Co-authored-by: ktrojan <kuba.trojan@intel.com>
This commit is contained in:
parent
b89ef94777
commit
9480e49eee
|
|
@ -132,6 +132,7 @@ following works:
|
||||||
- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||||
- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE)
|
- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||||
- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||||
|
- github.com/intel/iaevents [Apache License 2.0](https://github.com/intel/iaevents/blob/main/LICENSE)
|
||||||
- github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE)
|
- github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE)
|
||||||
- github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE)
|
- github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE)
|
||||||
- github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE)
|
- github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE)
|
||||||
|
|
|
||||||
1
go.mod
1
go.mod
|
|
@ -151,6 +151,7 @@ require (
|
||||||
github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7
|
github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7
|
||||||
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
|
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
|
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
|
||||||
|
github.com/intel/iaevents v1.0.0
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.5.0 // indirect
|
github.com/jackc/pgconn v1.5.0 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
|
|
|
||||||
2
go.sum
2
go.sum
|
|
@ -1249,6 +1249,8 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP
|
||||||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q=
|
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q=
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI=
|
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI=
|
||||||
|
github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E=
|
||||||
|
github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y=
|
||||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||||
|
|
|
||||||
|
|
@ -71,6 +71,7 @@ import (
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat"
|
_ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt"
|
_ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,210 @@
|
||||||
|
# Intel Performance Monitoring Unit Plugin
|
||||||
|
|
||||||
|
This input plugin exposes Intel PMU (Performance Monitoring Unit) metrics available through [Linux Perf](https://perf.wiki.kernel.org/index.php/Main_Page) subsystem.
|
||||||
|
|
||||||
|
PMU metrics gives insight into performance and health of IA processor's internal components,
|
||||||
|
including core and uncore units. With the number of cores increasing and processor topology getting more complex
|
||||||
|
the insight into those metrics is vital to assure the best CPU performance and utilization.
|
||||||
|
|
||||||
|
Performance counters are CPU hardware registers that count hardware events such as instructions executed, cache-misses suffered, or branches mispredicted.
|
||||||
|
They form a basis for profiling applications to trace dynamic control flow and identify hotspots.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem
|
||||||
|
[[inputs.intel_pmu]]
|
||||||
|
## List of filesystem locations of JSON files that contain PMU event definitions.
|
||||||
|
event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"]
|
||||||
|
|
||||||
|
## List of core events measurement entities. There can be more than one core_events sections.
|
||||||
|
[[inputs.intel_pmu.core_events]]
|
||||||
|
## List of events to be counted. Event names shall match names from event_definitions files.
|
||||||
|
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
|
||||||
|
## If absent, all core events from provided event_definitions are counted skipping unresolvable ones.
|
||||||
|
events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"]
|
||||||
|
|
||||||
|
## Limits the counting of events to core numbers specified.
|
||||||
|
## If absent, events are counted on all cores.
|
||||||
|
## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element.
|
||||||
|
## example: cores = ["0,2", "4", "12-16"]
|
||||||
|
cores = ["0"]
|
||||||
|
|
||||||
|
## Indicator that plugin shall attempt to run core_events.events as a single perf group.
|
||||||
|
## If absent or set to false, each event is counted individually. Defaults to false.
|
||||||
|
## This limits the number of events that can be measured to a maximum of available hardware counters per core.
|
||||||
|
## Could vary depending on type of event, use of fixed counters.
|
||||||
|
# perf_group = false
|
||||||
|
|
||||||
|
## Optionally set a custom tag value that will be added to every measurement within this events group.
|
||||||
|
## Can be applied to any group of events, unrelated to perf_group setting.
|
||||||
|
# events_tag = ""
|
||||||
|
|
||||||
|
## List of uncore event measurement entities. There can be more than one uncore_events sections.
|
||||||
|
[[inputs.intel_pmu.uncore_events]]
|
||||||
|
## List of events to be counted. Event names shall match names from event_definitions files.
|
||||||
|
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
|
||||||
|
## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones.
|
||||||
|
events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"]
|
||||||
|
|
||||||
|
## Limits the counting of events to specified sockets.
|
||||||
|
## If absent, events are counted on all sockets.
|
||||||
|
## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element.
|
||||||
|
## example: sockets = ["0-2"]
|
||||||
|
sockets = ["0"]
|
||||||
|
|
||||||
|
## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore.
|
||||||
|
## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false.
|
||||||
|
# aggregate_uncore_units = false
|
||||||
|
|
||||||
|
## Optionally set a custom tag value that will be added to every measurement within this events group.
|
||||||
|
# events_tag = ""
|
||||||
|
```
|
||||||
|
|
||||||
|
### Modifiers
|
||||||
|
|
||||||
|
Perf modifiers adjust event-specific perf attribute to fulfill particular requirements.
|
||||||
|
Details about perf attribute structure could be found in [perf_event_open](https://man7.org/linux/man-pages/man2/perf_event_open.2.html) syscall manual.
|
||||||
|
|
||||||
|
General schema of configuration's `events` list element:
|
||||||
|
|
||||||
|
```regexp
|
||||||
|
EVENT_NAME(:(config|config1|config2)=(0x[0-9a-f]{1-16})(p|k|u|h|H|I|G|D))*
|
||||||
|
```
|
||||||
|
|
||||||
|
where:
|
||||||
|
|
||||||
|
| Modifier | Underlying attribute | Description |
|
||||||
|
|----------|---------------------------------|-----------------------------|
|
||||||
|
| config | perf_event_attr.config | type-specific configuration |
|
||||||
|
| config1 | perf_event_attr.config1 | extension of config |
|
||||||
|
| config2 | perf_event_attr.config2 | extension of config1 |
|
||||||
|
| p | perf_event_attr.precise_ip | skid constraint |
|
||||||
|
| k | perf_event_attr.exclude_user | don't count user |
|
||||||
|
| u | perf_event_attr.exclude_kernel | don't count kernel |
|
||||||
|
| h / H | perf_event_attr.exclude_guest | don't count in guest |
|
||||||
|
| I | perf_event_attr.exclude_idle | don't count when idle |
|
||||||
|
| G | perf_event_attr.exclude_hv | don't count hypervisor |
|
||||||
|
| D | perf_event_attr.pinned | must always be on PMU |
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
The plugin is using [iaevents](https://github.com/intel/iaevents) library which is a golang package that makes accessing the Linux kernel's perf interface easier.
|
||||||
|
|
||||||
|
Intel PMU plugin, is only intended for use on **linux 64-bit** systems.
|
||||||
|
|
||||||
|
Event definition JSON files for specific architectures can be found at [01.org](https://download.01.org/perfmon/).
|
||||||
|
A script to download the event definitions that are appropriate for your system (event_download.py) is available at [pmu-tools](https://github.com/andikleen/pmu-tools).
|
||||||
|
Please keep these files in a safe place on your system.
|
||||||
|
|
||||||
|
## Measuring
|
||||||
|
|
||||||
|
Plugin allows measuring both core and uncore events. During plugin initialization the event names provided by user are compared
|
||||||
|
with event definitions included in JSON files and translated to perf attributes. Next, those events are activated to start counting.
|
||||||
|
During every telegraf interval, the plugin reads proper measurement for each previously activated event.
|
||||||
|
|
||||||
|
Each single core event may be counted severally on every available CPU's core. In contrast, uncore events could be placed in
|
||||||
|
many PMUs within specified CPU package. The plugin allows choosing core ids (core events) or socket ids (uncore events) on which the counting should be executed.
|
||||||
|
Uncore events are separately activated on all socket's PMUs, and can be exposed as separate
|
||||||
|
measurement or to be summed up as one measurement.
|
||||||
|
|
||||||
|
Obtained measurements are stored as three values: **Raw**, **Enabled** and **Running**. Raw is a total count of event. Enabled and running are total time the event was enabled and running.
|
||||||
|
Normally these are the same. If more events are started than available counter slots on the PMU, then multiplexing
|
||||||
|
occurs and events only run part of the time. Therefore, the plugin provides a 4-th value called **scaled** which is calculated using following formula:
|
||||||
|
`raw * enabled / running`.
|
||||||
|
|
||||||
|
Events are measured for all running processes.
|
||||||
|
|
||||||
|
### Core event groups
|
||||||
|
|
||||||
|
Perf allows assembling events as a group. A perf event group is scheduled onto the CPU as a unit: it will be put onto the CPU only if all of the events in the group can be put onto the CPU.
|
||||||
|
This means that the values of the member events can be meaningfully compared — added, divided (to get ratios), and so on — with each other,
|
||||||
|
since they have counted events for the same set of executed instructions [(source)](https://man7.org/linux/man-pages/man2/perf_event_open.2.html).
|
||||||
|
|
||||||
|
> **NOTE:**
|
||||||
|
> Be aware that the plugin will throw an error when trying to create core event group of size that exceeds available core PMU counters.
|
||||||
|
> The error message from perf syscall will be shown as "invalid argument". If you want to check how many PMUs are supported by your Intel CPU, you can use the [cpuid](https://linux.die.net/man/1/cpuid) command.
|
||||||
|
|
||||||
|
### Note about file descriptors
|
||||||
|
|
||||||
|
The plugin opens a number of file descriptors dependent on number of monitored CPUs and number of monitored
|
||||||
|
counters. It can easily exceed the default per process limit of allowed file descriptors. Depending on
|
||||||
|
configuration, it might be required to increase the limit of opened file descriptors allowed.
|
||||||
|
This can be done for example by using `ulimit -n command`.
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
On each Telegraf interval, Intel PMU plugin transmits following data:
|
||||||
|
|
||||||
|
### Metric Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|---------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| enabled | uint64 | time counter, contains time the associated perf event was enabled |
|
||||||
|
| running | uint64 | time counter, contains time the event was actually counted |
|
||||||
|
| raw | uint64 | value counter, contains event count value during the time the event was actually counted |
|
||||||
|
| scaled | uint64 | value counter, contains approximated value of counter if the event was continuously counted, using scaled = raw * (enabled / running) formula |
|
||||||
|
|
||||||
|
### Metric Tags - common
|
||||||
|
|
||||||
|
| Tag | Description |
|
||||||
|
|-------|------------------------------|
|
||||||
|
| host | hostname as read by Telegraf |
|
||||||
|
| event | name of the event |
|
||||||
|
|
||||||
|
### Metric Tags - core events
|
||||||
|
|
||||||
|
| Tag | Description |
|
||||||
|
|------------|----------------------------------------------------------------------------------------------------|
|
||||||
|
| cpu | CPU id as identified by linux OS (either logical cpu id when HT on or physical cpu id when HT off) |
|
||||||
|
| events_tag | (optional) tag as defined in "intel_pmu.core_events" configuration element |
|
||||||
|
|
||||||
|
### Metric Tags - uncore events
|
||||||
|
|
||||||
|
| Tag | Description |
|
||||||
|
|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| socket | socket number as identified by linux OS (physical_package_id) |
|
||||||
|
| unit_type | type of event-capable PMU that the event was counted for, provides category of PMU that the event was counted for, e.g. cbox for uncore_cbox_1, r2pcie for uncore_r2pcie etc. |
|
||||||
|
| unit | name of event-capable PMU that the event was counted for, as listed in /sys/bus/event_source/devices/ e.g. uncore_cbox_1, uncore_imc_1 etc. Present for non-aggregated uncore events only |
|
||||||
|
| events_tag| (optional) tag as defined in "intel_pmu.uncore_events" configuration element |
|
||||||
|
|
||||||
|
## Example outputs
|
||||||
|
|
||||||
|
Event group:
|
||||||
|
|
||||||
|
```text
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871237051i,running=2871237051i,raw=1171711i,scaled=1171711i 1621254096000000000
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871240713i,running=2871240713i,raw=72340716i,scaled=72340716i 1621254096000000000
|
||||||
|
pmu_metric,cpu=1,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871118275i,running=2871118275i,raw=1646752i,scaled=1646752i 1621254096000000000
|
||||||
|
pmu_metric,cpu=1,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz raw=108802421i,scaled=108802421i,enabled=2871120107i,running=2871120107i 1621254096000000000
|
||||||
|
pmu_metric,cpu=2,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871143950i,running=2871143950i,raw=1316834i,scaled=1316834i 1621254096000000000
|
||||||
|
pmu_metric,cpu=2,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871074681i,running=2871074681i,raw=68728436i,scaled=68728436i 1621254096000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
Uncore event not aggregated:
|
||||||
|
|
||||||
|
```text
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_0,unit_type=cbox enabled=2870630747i,running=2870630747i,raw=183996i,scaled=183996i 1621254096000000000
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_1,unit_type=cbox enabled=2870608194i,running=2870608194i,raw=185703i,scaled=185703i 1621254096000000000
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_2,unit_type=cbox enabled=2870600211i,running=2870600211i,raw=187331i,scaled=187331i 1621254096000000000
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_3,unit_type=cbox enabled=2870593914i,running=2870593914i,raw=184228i,scaled=184228i 1621254096000000000
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_4,unit_type=cbox scaled=195355i,enabled=2870558952i,running=2870558952i,raw=195355i 1621254096000000000
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_5,unit_type=cbox enabled=2870554131i,running=2870554131i,raw=197756i,scaled=197756i 1621254096000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
Uncore event aggregated:
|
||||||
|
|
||||||
|
```text
|
||||||
|
pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit_type=cbox enabled=13199712335i,running=13199712335i,raw=467485i,scaled=467485i 1621254412000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
Time multiplexing:
|
||||||
|
|
||||||
|
```text
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,host=xyz raw=2947727i,scaled=4428970i,enabled=2201071844i,running=1464935978i 1621254412000000000
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,host=xyz running=1465155618i,raw=302553190i,scaled=454511623i,enabled=2201035323i 1621254412000000000
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK,host=xyz enabled=2200994057i,running=1466812391i,raw=3177535i,scaled=4767982i 1621254412000000000
|
||||||
|
pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK_ANY,host=xyz enabled=2200963921i,running=1470523496i,raw=3359272i,scaled=5027894i 1621254412000000000
|
||||||
|
pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES_ANY,host=xyz enabled=2200933946i,running=1470322480i,raw=23631950i,scaled=35374798i 1621254412000000000
|
||||||
|
pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES,host=xyz raw=18767833i,scaled=28169827i,enabled=2200888514i,running=1466317384i 1621254412000000000
|
||||||
|
```
|
||||||
|
|
@ -0,0 +1,205 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
)
|
||||||
|
|
||||||
|
type placementMaker interface {
|
||||||
|
makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error)
|
||||||
|
makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaPlacementMaker struct{}
|
||||||
|
|
||||||
|
func (iaPlacementMaker) makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) {
|
||||||
|
var err error
|
||||||
|
var corePlacements []ia.PlacementProvider
|
||||||
|
|
||||||
|
switch len(cores) {
|
||||||
|
case 0:
|
||||||
|
return nil, errors.New("no cores provided")
|
||||||
|
case 1:
|
||||||
|
corePlacements, err = ia.NewCorePlacements(factory, cores[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
corePlacements, err = ia.NewCorePlacements(factory, cores[0], cores[1:]...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return corePlacements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iaPlacementMaker) makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) {
|
||||||
|
return ia.NewUncoreAllPlacements(factory, socket)
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventsActivator interface {
|
||||||
|
activateEvent(ia.Activator, ia.PlacementProvider, ia.Options) (*ia.ActiveEvent, error)
|
||||||
|
activateGroup(ia.PlacementProvider, []ia.CustomizableEvent) (*ia.ActiveEventGroup, error)
|
||||||
|
activateMulti(ia.MultiActivator, []ia.PlacementProvider, ia.Options) (*ia.ActiveMultiEvent, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaEventsActivator struct{}
|
||||||
|
|
||||||
|
func (iaEventsActivator) activateEvent(a ia.Activator, p ia.PlacementProvider, o ia.Options) (*ia.ActiveEvent, error) {
|
||||||
|
return a.Activate(p, ia.NewEventTargetProcess(-1, 0), o)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iaEventsActivator) activateGroup(p ia.PlacementProvider, e []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) {
|
||||||
|
return ia.ActivateGroup(p, ia.NewEventTargetProcess(-1, 0), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iaEventsActivator) activateMulti(a ia.MultiActivator, p []ia.PlacementProvider, o ia.Options) (*ia.ActiveMultiEvent, error) {
|
||||||
|
return a.ActivateMulti(p, ia.NewEventTargetProcess(-1, 0), o)
|
||||||
|
}
|
||||||
|
|
||||||
|
type entitiesActivator interface {
|
||||||
|
activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaEntitiesActivator struct {
|
||||||
|
placementMaker placementMaker
|
||||||
|
perfActivator eventsActivator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
|
for _, coreEventsEntity := range coreEntities {
|
||||||
|
err := ea.activateCoreEvents(coreEventsEntity)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, uncoreEventsEntity := range uncoreEntities {
|
||||||
|
err := ea.activateUncoreEvents(uncoreEventsEntity)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error {
|
||||||
|
if entity == nil {
|
||||||
|
return fmt.Errorf("core events entity is nil")
|
||||||
|
}
|
||||||
|
if ea.placementMaker == nil {
|
||||||
|
return fmt.Errorf("placement maker is nil")
|
||||||
|
}
|
||||||
|
if entity.PerfGroup {
|
||||||
|
err := ea.activateCoreEventsGroup(entity)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to activate core events group: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, event := range entity.parsedEvents {
|
||||||
|
if event == nil {
|
||||||
|
return fmt.Errorf("core parsed event is nil")
|
||||||
|
}
|
||||||
|
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
activeEvent, err := ea.activateEventForPlacements(event, placements)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
entity.activeEvents = append(entity.activeEvents, activeEvent...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error {
|
||||||
|
if entity == nil {
|
||||||
|
return fmt.Errorf("uncore events entity is nil")
|
||||||
|
}
|
||||||
|
if ea.perfActivator == nil || ea.placementMaker == nil {
|
||||||
|
return fmt.Errorf("events activator or placement maker is nil")
|
||||||
|
}
|
||||||
|
for _, event := range entity.parsedEvents {
|
||||||
|
if event == nil {
|
||||||
|
return fmt.Errorf("uncore parsed event is nil")
|
||||||
|
}
|
||||||
|
perfEvent := event.custom.Event
|
||||||
|
if perfEvent == nil {
|
||||||
|
return fmt.Errorf("perf event of `%s` event is nil", event.name)
|
||||||
|
}
|
||||||
|
options := event.custom.Options
|
||||||
|
|
||||||
|
for _, socket := range entity.parsedSockets {
|
||||||
|
placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
events := activeMultiEvent.Events()
|
||||||
|
entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error {
|
||||||
|
if ea.perfActivator == nil || ea.placementMaker == nil {
|
||||||
|
return fmt.Errorf("missing perf activator or placement maker")
|
||||||
|
}
|
||||||
|
if entity == nil || len(entity.parsedEvents) < 1 {
|
||||||
|
return fmt.Errorf("missing parsed events")
|
||||||
|
}
|
||||||
|
|
||||||
|
var events []ia.CustomizableEvent
|
||||||
|
for _, event := range entity.parsedEvents {
|
||||||
|
if event == nil {
|
||||||
|
return fmt.Errorf("core event is nil")
|
||||||
|
}
|
||||||
|
events = append(events, event.custom)
|
||||||
|
}
|
||||||
|
leader := entity.parsedEvents[0].custom
|
||||||
|
|
||||||
|
placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to make core placements: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, plc := range placements {
|
||||||
|
activeGroup, err := ea.perfActivator.activateGroup(plc, events)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entity.activeEvents = append(entity.activeEvents, activeGroup.Events()...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) {
|
||||||
|
if event == nil {
|
||||||
|
return nil, fmt.Errorf("core event is nil")
|
||||||
|
}
|
||||||
|
if ea.perfActivator == nil {
|
||||||
|
return nil, fmt.Errorf("missing perf activator")
|
||||||
|
}
|
||||||
|
var activeEvents []*ia.ActiveEvent
|
||||||
|
for _, placement := range placements {
|
||||||
|
perfEvent := event.custom.Event
|
||||||
|
options := event.custom.Options
|
||||||
|
|
||||||
|
activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
activeEvents = append(activeEvents, activeEvent)
|
||||||
|
}
|
||||||
|
return activeEvents, nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,432 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockPlacementFactory struct {
|
||||||
|
err bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockPlacementFactory) NewPlacements(_ string, cpu int, cpus ...int) ([]ia.PlacementProvider, error) {
|
||||||
|
if m.err {
|
||||||
|
return nil, errors.New("mock error")
|
||||||
|
}
|
||||||
|
placements := []ia.PlacementProvider{
|
||||||
|
&ia.Placement{CPU: cpu, PMUType: 4},
|
||||||
|
}
|
||||||
|
for _, cpu := range cpus {
|
||||||
|
placements = append(placements, &ia.Placement{CPU: cpu, PMUType: 4})
|
||||||
|
}
|
||||||
|
return placements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateEntities(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{}
|
||||||
|
|
||||||
|
// more core test cases in TestActivateCoreEvents
|
||||||
|
t.Run("failed to activate core events", func(t *testing.T) {
|
||||||
|
tag := "TAG"
|
||||||
|
mEntities := []*CoreEventEntity{{EventsTag: tag}}
|
||||||
|
err := mEntitiesActivator.activateEntities(mEntities, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag))
|
||||||
|
})
|
||||||
|
|
||||||
|
// more uncore test cases in TestActivateUncoreEvents
|
||||||
|
t.Run("failed to activate uncore events", func(t *testing.T) {
|
||||||
|
tag := "TAG"
|
||||||
|
mEntities := []*UncoreEventEntity{{EventsTag: tag}}
|
||||||
|
err := mEntitiesActivator.activateEntities(nil, mEntities)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nothing to do", func(t *testing.T) {
|
||||||
|
err := mEntitiesActivator.activateEntities(nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateUncoreEvents(t *testing.T) {
|
||||||
|
mActivator := &mockEventsActivator{}
|
||||||
|
mMaker := &mockPlacementMaker{}
|
||||||
|
errMock := fmt.Errorf("error mock")
|
||||||
|
|
||||||
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "uncore events entity is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("event is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}}
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "uncore parsed event is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("perf event is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
name := "event name"
|
||||||
|
mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}}
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("placement maker and perf activator is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: nil}
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(&UncoreEventEntity{})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "events activator or placement maker is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to create placements", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
eventName := "mock event 1"
|
||||||
|
parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}}
|
||||||
|
mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}}
|
||||||
|
|
||||||
|
mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(nil, errMock).Once()
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(mEntity)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName))
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to activate event", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
eventName := "mock event 1"
|
||||||
|
parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}}
|
||||||
|
placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}}
|
||||||
|
mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}}
|
||||||
|
|
||||||
|
mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(placements, nil).Once()
|
||||||
|
mActivator.On("activateMulti", parsedEvents[0].custom.Event, placements, parsedEvents[0].custom.Options).Return(nil, errMock).Once()
|
||||||
|
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName))
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("successfully activate core events", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
|
||||||
|
parsedEvents := []*eventWithQuals{
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1", Uncore: true}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2", Uncore: true}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3", Uncore: true}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4", Uncore: true}}},
|
||||||
|
}
|
||||||
|
mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}}
|
||||||
|
placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}, &ia.Placement{}}
|
||||||
|
|
||||||
|
var expectedEvents []multiEvent
|
||||||
|
for _, event := range parsedEvents {
|
||||||
|
for _, socket := range mEntity.parsedSockets {
|
||||||
|
mMaker.On("makeUncorePlacements", event.custom.Event, socket).Return(placements, nil).Once()
|
||||||
|
newActiveMultiEvent := &ia.ActiveMultiEvent{}
|
||||||
|
expectedEvents = append(expectedEvents, multiEvent{newActiveMultiEvent.Events(), event.custom.Event, socket})
|
||||||
|
mActivator.On("activateMulti", event.custom.Event, placements, event.custom.Options).Return(newActiveMultiEvent, nil).Once()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := mEntitiesActivator.activateUncoreEvents(mEntity)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedEvents, mEntity.activeMultiEvents)
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateCoreEvents(t *testing.T) {
|
||||||
|
mMaker := &mockPlacementMaker{}
|
||||||
|
mActivator := &mockEventsActivator{}
|
||||||
|
errMock := fmt.Errorf("error mock")
|
||||||
|
|
||||||
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "core events entity is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("placement maker is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: mActivator}
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(&CoreEventEntity{})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "placement maker is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("event is nil", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
mEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}}
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "core parsed event is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to create placements", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}}
|
||||||
|
mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}}
|
||||||
|
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, parsedEvents[0].custom.Event).Return(nil, errMock).Once()
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(mEntity)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name))
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to activate event", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
|
||||||
|
parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}}
|
||||||
|
placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}}
|
||||||
|
mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}}
|
||||||
|
|
||||||
|
event := parsedEvents[0]
|
||||||
|
plc := placements[0]
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once()
|
||||||
|
mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(nil, errMock).Once()
|
||||||
|
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name))
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to activate core events group", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: nil}
|
||||||
|
mEntity := &CoreEventEntity{PerfGroup: true, parsedEvents: nil}
|
||||||
|
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to activate core events group")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("successfully activate core events", func(t *testing.T) {
|
||||||
|
mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
|
||||||
|
parsedEvents := []*eventWithQuals{
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2"}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3"}}},
|
||||||
|
{custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4"}}},
|
||||||
|
}
|
||||||
|
placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}, &ia.Placement{CPU: 2}}
|
||||||
|
mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}}
|
||||||
|
|
||||||
|
var activeEvents []*ia.ActiveEvent
|
||||||
|
for _, event := range parsedEvents {
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once()
|
||||||
|
for _, plc := range placements {
|
||||||
|
newActiveEvent := &ia.ActiveEvent{PerfEvent: event.custom.Event}
|
||||||
|
activeEvents = append(activeEvents, newActiveEvent)
|
||||||
|
mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(newActiveEvent, nil).Once()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := mEntitiesActivator.activateCoreEvents(mEntity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, activeEvents, mEntity.activeEvents)
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateCoreEventsGroup(t *testing.T) {
|
||||||
|
mMaker := &mockPlacementMaker{}
|
||||||
|
mActivator := &mockEventsActivator{}
|
||||||
|
eActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator}
|
||||||
|
errMock := errors.New("mock error")
|
||||||
|
|
||||||
|
leader := &ia.PerfEvent{Name: "mock event 1"}
|
||||||
|
perfEvent2 := &ia.PerfEvent{Name: "mock event 2"}
|
||||||
|
|
||||||
|
parsedEvents := []*eventWithQuals{{custom: ia.CustomizableEvent{Event: leader}}, {custom: ia.CustomizableEvent{Event: perfEvent2}}}
|
||||||
|
placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}}
|
||||||
|
|
||||||
|
// cannot populate this struct due to unexported events field
|
||||||
|
activeGroup := &ia.ActiveEventGroup{}
|
||||||
|
|
||||||
|
mEntity := &CoreEventEntity{
|
||||||
|
EventsTag: "mock group",
|
||||||
|
PerfGroup: true,
|
||||||
|
parsedEvents: parsedEvents,
|
||||||
|
parsedCores: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
var events []ia.CustomizableEvent
|
||||||
|
for _, event := range parsedEvents {
|
||||||
|
events = append(events, event.custom)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("missing perf activator and placement maker", func(t *testing.T) {
|
||||||
|
mActivator := &iaEntitiesActivator{}
|
||||||
|
err := mActivator.activateCoreEventsGroup(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "missing perf activator or placement maker")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("missing parsed events", func(t *testing.T) {
|
||||||
|
mActivator := &iaEntitiesActivator{placementMaker: &mockPlacementMaker{}, perfActivator: &mockEventsActivator{}}
|
||||||
|
err := mActivator.activateCoreEventsGroup(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "missing parsed events")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nil in parsed event", func(t *testing.T) {
|
||||||
|
mEntity := &CoreEventEntity{EventsTag: "Nice tag", PerfGroup: true, parsedEvents: []*eventWithQuals{nil, nil}}
|
||||||
|
err := eActivator.activateCoreEventsGroup(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "core event is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to make core placements", func(t *testing.T) {
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(nil, errMock).Once()
|
||||||
|
err := eActivator.activateCoreEventsGroup(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to make core placements")
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to activate group", func(t *testing.T) {
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once()
|
||||||
|
mActivator.On("activateGroup", placements[0], events).Return(nil, errMock).Once()
|
||||||
|
|
||||||
|
err := eActivator.activateCoreEventsGroup(mEntity)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), errMock.Error())
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
var allActive []*ia.ActiveEvent
|
||||||
|
t.Run("successfully activated group", func(t *testing.T) {
|
||||||
|
mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once()
|
||||||
|
for _, plc := range placements {
|
||||||
|
mActivator.On("activateGroup", plc, events).Return(activeGroup, nil).Once()
|
||||||
|
allActive = append(allActive, activeGroup.Events()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := eActivator.activateCoreEventsGroup(mEntity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, allActive, mEntity.activeEvents)
|
||||||
|
mMaker.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeCorePlacements(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
cores []int
|
||||||
|
perfEvent ia.PlacementFactory
|
||||||
|
result []ia.PlacementProvider
|
||||||
|
errMsg string
|
||||||
|
}{
|
||||||
|
{"no cores", nil, &ia.PerfEvent{}, nil, "no cores provided"},
|
||||||
|
{"one core placement", []int{1}, &mockPlacementFactory{}, []ia.PlacementProvider{&ia.Placement{CPU: 1, PMUType: 4}}, ""},
|
||||||
|
{"multiple core placement", []int{1, 2, 4}, &mockPlacementFactory{}, []ia.PlacementProvider{
|
||||||
|
&ia.Placement{CPU: 1, PMUType: 4},
|
||||||
|
&ia.Placement{CPU: 2, PMUType: 4},
|
||||||
|
&ia.Placement{CPU: 4, PMUType: 4}},
|
||||||
|
""},
|
||||||
|
{"placement factory error", []int{1}, &mockPlacementFactory{true}, nil, "mock error"},
|
||||||
|
{"placement factory error 2", []int{1, 2, 3}, &mockPlacementFactory{true}, nil, "mock error"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
maker := &iaPlacementMaker{}
|
||||||
|
providers, err := maker.makeCorePlacements(test.cores, test.perfEvent)
|
||||||
|
if len(test.errMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Nil(t, providers)
|
||||||
|
require.Contains(t, err.Error(), test.errMsg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.result, providers)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateEventForPlacement(t *testing.T) {
|
||||||
|
placement1 := &ia.Placement{CPU: 0}
|
||||||
|
placement2 := &ia.Placement{CPU: 1}
|
||||||
|
placement3 := &ia.Placement{CPU: 2}
|
||||||
|
|
||||||
|
mPlacements := []ia.PlacementProvider{placement1, placement2, placement3}
|
||||||
|
|
||||||
|
mPerfEvent := &ia.PerfEvent{Name: "mock1"}
|
||||||
|
mOptions := &ia.PerfEventOptions{}
|
||||||
|
mEvent := &eventWithQuals{name: mPerfEvent.Name, custom: ia.CustomizableEvent{Event: mPerfEvent, Options: mOptions}}
|
||||||
|
|
||||||
|
mPerfActivator := &mockEventsActivator{}
|
||||||
|
mActivator := &iaEntitiesActivator{perfActivator: mPerfActivator}
|
||||||
|
|
||||||
|
t.Run("event is nil", func(t *testing.T) {
|
||||||
|
activeEvents, err := mActivator.activateEventForPlacements(nil, mPlacements)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "core event is nil")
|
||||||
|
require.Nil(t, activeEvents)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("perf activator is nil", func(t *testing.T) {
|
||||||
|
mActivator := &iaEntitiesActivator{}
|
||||||
|
activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "missing perf activator")
|
||||||
|
require.Nil(t, activeEvents)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("placements are nil", func(t *testing.T) {
|
||||||
|
activeEvents, err := mActivator.activateEventForPlacements(mEvent, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, activeEvents)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("activation error", func(t *testing.T) {
|
||||||
|
mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err"))
|
||||||
|
activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name))
|
||||||
|
require.Nil(t, activeEvents)
|
||||||
|
mPerfActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("successfully activated", func(t *testing.T) {
|
||||||
|
mActiveEvent := &ia.ActiveEvent{}
|
||||||
|
mActiveEvent2 := &ia.ActiveEvent{}
|
||||||
|
mActiveEvent3 := &ia.ActiveEvent{}
|
||||||
|
|
||||||
|
mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(mActiveEvent, nil).
|
||||||
|
On("activateEvent", mPerfEvent, placement2, mOptions).Once().Return(mActiveEvent2, nil).
|
||||||
|
On("activateEvent", mPerfEvent, placement3, mOptions).Once().Return(mActiveEvent3, nil)
|
||||||
|
|
||||||
|
activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, activeEvents, len(mPlacements))
|
||||||
|
require.Contains(t, activeEvents, mActiveEvent)
|
||||||
|
require.Contains(t, activeEvents, mActiveEvent2)
|
||||||
|
mPerfActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,239 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Maximum size of core IDs or socket IDs (8192). Based on maximum value of CPUs that linux kernel supports.
|
||||||
|
const maxIDsSize = 1 << 13
|
||||||
|
|
||||||
|
type entitiesParser interface {
|
||||||
|
parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type configParser struct {
|
||||||
|
log telegraf.Logger
|
||||||
|
sys sysInfoProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) {
|
||||||
|
if len(coreEntities) == 0 && len(uncoreEntities) == 0 {
|
||||||
|
return fmt.Errorf("neither core nor uncore entities configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, coreEntity := range coreEntities {
|
||||||
|
if coreEntity == nil {
|
||||||
|
return fmt.Errorf("core entity is nil")
|
||||||
|
}
|
||||||
|
if coreEntity.Events == nil {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Debug("all core events from provided files will be configured")
|
||||||
|
}
|
||||||
|
coreEntity.allEvents = true
|
||||||
|
} else {
|
||||||
|
events := cp.parseEvents(coreEntity.Events)
|
||||||
|
if events == nil {
|
||||||
|
return fmt.Errorf("an empty list of core events was provided")
|
||||||
|
}
|
||||||
|
coreEntity.parsedEvents = events
|
||||||
|
}
|
||||||
|
|
||||||
|
coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during cores parsing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, uncoreEntity := range uncoreEntities {
|
||||||
|
if uncoreEntity == nil {
|
||||||
|
return fmt.Errorf("uncore entity is nil")
|
||||||
|
}
|
||||||
|
if uncoreEntity.Events == nil {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Debug("all uncore events from provided files will be configured")
|
||||||
|
}
|
||||||
|
uncoreEntity.allEvents = true
|
||||||
|
} else {
|
||||||
|
events := cp.parseEvents(uncoreEntity.Events)
|
||||||
|
if events == nil {
|
||||||
|
return fmt.Errorf("an empty list of uncore events was provided")
|
||||||
|
}
|
||||||
|
uncoreEntity.parsedEvents = events
|
||||||
|
}
|
||||||
|
|
||||||
|
uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during sockets parsing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *configParser) parseEvents(events []string) []*eventWithQuals {
|
||||||
|
if len(events) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
events, duplications := removeDuplicateStrings(events)
|
||||||
|
for _, duplication := range duplications {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Warnf("duplicated event `%s` will be removed", duplication)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parseEventsWithQualifiers(events)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *configParser) parseCores(cores []string) ([]int, error) {
|
||||||
|
if cores == nil {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Debug("all possible cores will be configured")
|
||||||
|
}
|
||||||
|
if cp.sys == nil {
|
||||||
|
return nil, fmt.Errorf("system info provider is nil")
|
||||||
|
}
|
||||||
|
cores, err := cp.sys.allCPUs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot obtain all cpus: %v", err)
|
||||||
|
}
|
||||||
|
return cores, nil
|
||||||
|
}
|
||||||
|
if len(cores) == 0 {
|
||||||
|
return nil, fmt.Errorf("an empty list of cores was provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := cp.parseIntRanges(cores)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *configParser) parseSockets(sockets []string) ([]int, error) {
|
||||||
|
if sockets == nil {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Debug("all possible sockets will be configured")
|
||||||
|
}
|
||||||
|
if cp.sys == nil {
|
||||||
|
return nil, fmt.Errorf("system info provider is nil")
|
||||||
|
}
|
||||||
|
sockets, err := cp.sys.allSockets()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot obtain all sockets: %v", err)
|
||||||
|
}
|
||||||
|
return sockets, nil
|
||||||
|
}
|
||||||
|
if len(sockets) == 0 {
|
||||||
|
return nil, fmt.Errorf("an empty list of sockets was provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := cp.parseIntRanges(sockets)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cp *configParser) parseIntRanges(ranges []string) ([]int, error) {
|
||||||
|
var ids []int
|
||||||
|
var duplicatedIDs []int
|
||||||
|
var err error
|
||||||
|
ids, err = parseIDs(ranges)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids, duplicatedIDs = removeDuplicateValues(ids)
|
||||||
|
for _, duplication := range duplicatedIDs {
|
||||||
|
if cp.log != nil {
|
||||||
|
cp.log.Warnf("duplicated id number `%d` will be removed", duplication)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEventsWithQualifiers(events []string) []*eventWithQuals {
|
||||||
|
var result []*eventWithQuals
|
||||||
|
|
||||||
|
for _, event := range events {
|
||||||
|
newEventWithQualifiers := &eventWithQuals{}
|
||||||
|
|
||||||
|
split := strings.Split(event, ":")
|
||||||
|
newEventWithQualifiers.name = split[0]
|
||||||
|
|
||||||
|
if len(split) > 1 {
|
||||||
|
newEventWithQualifiers.qualifiers = split[1:]
|
||||||
|
}
|
||||||
|
result = append(result, newEventWithQualifiers)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIDs(allIDsStrings []string) ([]int, error) {
|
||||||
|
var result []int
|
||||||
|
for _, idsString := range allIDsStrings {
|
||||||
|
ids := strings.Split(idsString, ",")
|
||||||
|
|
||||||
|
for _, id := range ids {
|
||||||
|
id := strings.TrimSpace(id)
|
||||||
|
// a-b support
|
||||||
|
var start, end uint
|
||||||
|
n, err := fmt.Sscanf(id, "%d-%d", &start, &end)
|
||||||
|
if err == nil && n == 2 {
|
||||||
|
if start >= end {
|
||||||
|
return nil, fmt.Errorf("`%d` is equal or greater than `%d`", start, end)
|
||||||
|
}
|
||||||
|
for ; start <= end; start++ {
|
||||||
|
if len(result)+1 > maxIDsSize {
|
||||||
|
return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize)
|
||||||
|
}
|
||||||
|
result = append(result, int(start))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Single value
|
||||||
|
num, err := strconv.Atoi(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err)
|
||||||
|
}
|
||||||
|
if len(result)+1 > maxIDsSize {
|
||||||
|
return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize)
|
||||||
|
}
|
||||||
|
result = append(result, num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDuplicateValues(intSlice []int) (result []int, duplicates []int) {
|
||||||
|
keys := make(map[int]bool)
|
||||||
|
|
||||||
|
for _, entry := range intSlice {
|
||||||
|
if _, value := keys[entry]; !value {
|
||||||
|
keys[entry] = true
|
||||||
|
result = append(result, entry)
|
||||||
|
} else {
|
||||||
|
duplicates = append(duplicates, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, duplicates
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDuplicateStrings(strSlice []string) (result []string, duplicates []string) {
|
||||||
|
keys := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, entry := range strSlice {
|
||||||
|
if _, value := keys[entry]; !value {
|
||||||
|
keys[entry] = true
|
||||||
|
result = append(result, entry)
|
||||||
|
} else {
|
||||||
|
duplicates = append(duplicates, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, duplicates
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,230 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigParser_parseEntities(t *testing.T) {
|
||||||
|
mSysInfo := &mockSysInfoProvider{}
|
||||||
|
mConfigParser := &configParser{
|
||||||
|
sys: mSysInfo,
|
||||||
|
log: testutil.Logger{},
|
||||||
|
}
|
||||||
|
e := ia.CustomizableEvent{}
|
||||||
|
|
||||||
|
t.Run("no entities", func(t *testing.T) {
|
||||||
|
err := mConfigParser.parseEntities(nil, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "neither core nor uncore entities configured")
|
||||||
|
})
|
||||||
|
|
||||||
|
// more specific parsing cases in TestConfigParser_parseIntRanges and TestConfigParser_parseEvents
|
||||||
|
coreTests := []struct {
|
||||||
|
name string
|
||||||
|
|
||||||
|
coreEntity *CoreEventEntity
|
||||||
|
parsedCoreEvents []*eventWithQuals
|
||||||
|
parsedCores []int
|
||||||
|
coreAll bool
|
||||||
|
|
||||||
|
uncoreEntity *UncoreEventEntity
|
||||||
|
parsedUncoreEvents []*eventWithQuals
|
||||||
|
parsedSockets []int
|
||||||
|
uncoreAll bool
|
||||||
|
|
||||||
|
failMsg string
|
||||||
|
}{
|
||||||
|
{"no events provided",
|
||||||
|
&CoreEventEntity{Events: nil, Cores: []string{"1"}}, nil, []int{1}, true,
|
||||||
|
&UncoreEventEntity{Events: nil, Sockets: []string{"0"}}, nil, []int{0}, true,
|
||||||
|
""},
|
||||||
|
{"uncore entity is nil",
|
||||||
|
&CoreEventEntity{Events: []string{"EVENT"}, Cores: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false,
|
||||||
|
nil, nil, nil, false,
|
||||||
|
"uncore entity is nil"},
|
||||||
|
{"core entity is nil",
|
||||||
|
nil, nil, nil, false,
|
||||||
|
&UncoreEventEntity{Events: []string{"EVENT"}, Sockets: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false,
|
||||||
|
"core entity is nil"},
|
||||||
|
{"error parsing sockets",
|
||||||
|
&CoreEventEntity{Events: nil, Cores: []string{"1,2"}}, nil, []int{1, 2}, true,
|
||||||
|
&UncoreEventEntity{Events: []string{"E"}, Sockets: []string{"wrong sockets"}}, []*eventWithQuals{{"E", nil, e}}, nil, false,
|
||||||
|
"error during sockets parsing"},
|
||||||
|
{"error parsing cores",
|
||||||
|
&CoreEventEntity{Events: nil, Cores: []string{"wrong cpus"}}, nil, nil, true,
|
||||||
|
&UncoreEventEntity{Events: nil, Sockets: []string{"0,1"}}, nil, []int{0, 1}, true,
|
||||||
|
"error during cores parsing"},
|
||||||
|
{"valid settings",
|
||||||
|
&CoreEventEntity{Events: []string{"E1", "E2:config=123"}, Cores: []string{"1-5"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", []string{"config=123"}, e}}, []int{1, 2, 3, 4, 5}, false,
|
||||||
|
&UncoreEventEntity{Events: []string{"E1", "E2", "E3"}, Sockets: []string{"0,2-6"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", nil, e}, {"E3", nil, e}}, []int{0, 2, 3, 4, 5, 6}, false,
|
||||||
|
""},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range coreTests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
coreEntities := []*CoreEventEntity{test.coreEntity}
|
||||||
|
uncoreEntities := []*UncoreEventEntity{test.uncoreEntity}
|
||||||
|
|
||||||
|
err := mConfigParser.parseEntities(coreEntities, uncoreEntities)
|
||||||
|
|
||||||
|
if len(test.failMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.failMsg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.coreAll, test.coreEntity.allEvents)
|
||||||
|
require.Equal(t, test.parsedCores, test.coreEntity.parsedCores)
|
||||||
|
require.Equal(t, test.parsedCoreEvents, test.coreEntity.parsedEvents)
|
||||||
|
|
||||||
|
require.Equal(t, test.uncoreAll, test.uncoreEntity.allEvents)
|
||||||
|
require.Equal(t, test.parsedSockets, test.uncoreEntity.parsedSockets)
|
||||||
|
require.Equal(t, test.parsedUncoreEvents, test.uncoreEntity.parsedEvents)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigParser_parseCores(t *testing.T) {
|
||||||
|
mSysInfo := &mockSysInfoProvider{}
|
||||||
|
mConfigParser := &configParser{
|
||||||
|
sys: mSysInfo,
|
||||||
|
log: testutil.Logger{},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("no cores provided", func(t *testing.T) {
|
||||||
|
t.Run("system info provider is nil", func(t *testing.T) {
|
||||||
|
result, err := (&configParser{}).parseCores(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "system info provider is nil")
|
||||||
|
require.Nil(t, result)
|
||||||
|
})
|
||||||
|
t.Run("cannot gather all cpus info", func(t *testing.T) {
|
||||||
|
mSysInfo.On("allCPUs").Return(nil, errors.New("all cpus error")).Once()
|
||||||
|
result, err := mConfigParser.parseCores(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "cannot obtain all cpus")
|
||||||
|
require.Nil(t, result)
|
||||||
|
mSysInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
t.Run("all cpus gathering succeeded", func(t *testing.T) {
|
||||||
|
allCPUs := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
|
||||||
|
|
||||||
|
mSysInfo.On("allCPUs").Return(allCPUs, nil).Once()
|
||||||
|
result, err := mConfigParser.parseCores(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, allCPUs, result)
|
||||||
|
mSysInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigParser_parseSockets(t *testing.T) {
|
||||||
|
mSysInfo := &mockSysInfoProvider{}
|
||||||
|
mConfigParser := &configParser{
|
||||||
|
sys: mSysInfo,
|
||||||
|
log: testutil.Logger{},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("no sockets provided", func(t *testing.T) {
|
||||||
|
t.Run("system info provider is nil", func(t *testing.T) {
|
||||||
|
result, err := (&configParser{}).parseSockets(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "system info provider is nil")
|
||||||
|
require.Nil(t, result)
|
||||||
|
})
|
||||||
|
t.Run("cannot gather all sockets info", func(t *testing.T) {
|
||||||
|
mSysInfo.On("allSockets").Return(nil, errors.New("all sockets error")).Once()
|
||||||
|
result, err := mConfigParser.parseSockets(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "cannot obtain all sockets")
|
||||||
|
require.Nil(t, result)
|
||||||
|
mSysInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
t.Run("all cpus gathering succeeded", func(t *testing.T) {
|
||||||
|
allSockets := []int{0, 1, 2, 3, 4}
|
||||||
|
|
||||||
|
mSysInfo.On("allSockets").Return(allSockets, nil).Once()
|
||||||
|
result, err := mConfigParser.parseSockets(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, allSockets, result)
|
||||||
|
mSysInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigParser_parseEvents(t *testing.T) {
|
||||||
|
mConfigParser := &configParser{log: testutil.Logger{}}
|
||||||
|
e := ia.CustomizableEvent{}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []string
|
||||||
|
result []*eventWithQuals
|
||||||
|
}{
|
||||||
|
{"no events", nil, nil},
|
||||||
|
{"single string", []string{"mock string"}, []*eventWithQuals{{"mock string", nil, e}}},
|
||||||
|
{"two events", []string{"EVENT.FIRST", "EVENT.SECOND"}, []*eventWithQuals{{"EVENT.FIRST", nil, e}, {"EVENT.SECOND", nil, e}}},
|
||||||
|
{"event with configs", []string{"EVENT.SECOND:config1=0x404300k:config2=0x404300k"},
|
||||||
|
[]*eventWithQuals{{"EVENT.SECOND", []string{"config1=0x404300k", "config2=0x404300k"}, e}}},
|
||||||
|
{"two events with modifiers", []string{"EVENT.FIRST:config1=0x200300:config2=0x231100:u:H", "EVENT.SECOND:K:p"},
|
||||||
|
[]*eventWithQuals{{"EVENT.FIRST", []string{"config1=0x200300", "config2=0x231100", "u", "H"}, e}, {"EVENT.SECOND", []string{"K", "p"}, e}}},
|
||||||
|
{"duplicates", []string{"EVENT1", "EVENT1", "EVENT2"}, []*eventWithQuals{{"EVENT1", nil, e}, {"EVENT2", nil, e}}},
|
||||||
|
{"duplicates with different configs", []string{"EVENT1:config1", "EVENT1:config2"},
|
||||||
|
[]*eventWithQuals{{"EVENT1", []string{"config1"}, e}, {"EVENT1", []string{"config2"}, e}}},
|
||||||
|
{"duplicates with the same modifiers", []string{"EVENT1:config1", "EVENT1:config1"},
|
||||||
|
[]*eventWithQuals{{"EVENT1", []string{"config1"}, e}}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
result := mConfigParser.parseEvents(test.input)
|
||||||
|
require.Equal(t, test.result, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigParser_parseIntRanges(t *testing.T) {
|
||||||
|
mConfigParser := &configParser{log: testutil.Logger{}}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []string
|
||||||
|
result []int
|
||||||
|
failMsg string
|
||||||
|
}{
|
||||||
|
{"coma separated", []string{"0,1,2,3,4"}, []int{0, 1, 2, 3, 4}, ""},
|
||||||
|
{"range", []string{"0-10"}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, ""},
|
||||||
|
{"mixed", []string{"0-3", "4", "12-16"}, []int{0, 1, 2, 3, 4, 12, 13, 14, 15, 16}, ""},
|
||||||
|
{"min and max values", []string{"-2147483648", "2147483647"}, []int{math.MinInt32, math.MaxInt32}, ""},
|
||||||
|
{"should remove duplicates", []string{"1-5", "2-6"}, []int{1, 2, 3, 4, 5, 6}, ""},
|
||||||
|
{"wrong format", []string{"1,2,3%$S,-100"}, nil, "wrong format for id"},
|
||||||
|
{"start is greater than end", []string{"10-3"}, nil, "`10` is equal or greater than `3"},
|
||||||
|
{"too big value", []string{"18446744073709551615"}, nil, "wrong format for id"},
|
||||||
|
{"too much numbers", []string{fmt.Sprintf("0-%d", maxIDsSize)}, nil,
|
||||||
|
fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)},
|
||||||
|
{"too much numbers mixed", []string{fmt.Sprintf("1-%d", maxIDsSize), "0"}, nil,
|
||||||
|
fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
result, err := mConfigParser.parseIntRanges(test.input)
|
||||||
|
require.Equal(t, test.result, result)
|
||||||
|
if len(test.failMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.failMsg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,477 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Linux availability: https://www.kernel.org/doc/Documentation/sysctl/fs.txt
|
||||||
|
const fileMaxPath = "/proc/sys/fs/file-max"
|
||||||
|
|
||||||
|
type fileInfoProvider interface {
|
||||||
|
readFile(string) ([]byte, error)
|
||||||
|
lstat(string) (os.FileInfo, error)
|
||||||
|
fileLimit() (uint64, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileHelper struct{}
|
||||||
|
|
||||||
|
func (fileHelper) readFile(path string) ([]byte, error) {
|
||||||
|
return ioutil.ReadFile(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fileHelper) lstat(path string) (os.FileInfo, error) {
|
||||||
|
return os.Lstat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fileHelper) fileLimit() (uint64, error) {
|
||||||
|
var rLimit syscall.Rlimit
|
||||||
|
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
|
||||||
|
return rLimit.Cur, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type sysInfoProvider interface {
|
||||||
|
allCPUs() ([]int, error)
|
||||||
|
allSockets() ([]int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaSysInfo struct{}
|
||||||
|
|
||||||
|
func (iaSysInfo) allCPUs() ([]int, error) {
|
||||||
|
return ia.AllCPUs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (iaSysInfo) allSockets() ([]int, error) {
|
||||||
|
return ia.AllSockets()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntelPMU is the plugin type.
|
||||||
|
type IntelPMU struct {
|
||||||
|
EventListPaths []string `toml:"event_definitions"`
|
||||||
|
CoreEntities []*CoreEventEntity `toml:"core_events"`
|
||||||
|
UncoreEntities []*UncoreEventEntity `toml:"uncore_events"`
|
||||||
|
|
||||||
|
Log telegraf.Logger `toml:"-"`
|
||||||
|
|
||||||
|
fileInfo fileInfoProvider
|
||||||
|
entitiesReader entitiesValuesReader
|
||||||
|
}
|
||||||
|
|
||||||
|
// CoreEventEntity represents config section for core events.
|
||||||
|
type CoreEventEntity struct {
|
||||||
|
Events []string `toml:"events"`
|
||||||
|
Cores []string `toml:"cores"`
|
||||||
|
EventsTag string `toml:"events_tag"`
|
||||||
|
PerfGroup bool `toml:"perf_group"`
|
||||||
|
|
||||||
|
parsedEvents []*eventWithQuals
|
||||||
|
parsedCores []int
|
||||||
|
allEvents bool
|
||||||
|
|
||||||
|
activeEvents []*ia.ActiveEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
// UncoreEventEntity represents config section for uncore events.
|
||||||
|
type UncoreEventEntity struct {
|
||||||
|
Events []string `toml:"events"`
|
||||||
|
Sockets []string `toml:"sockets"`
|
||||||
|
Aggregate bool `toml:"aggregate_uncore_units"`
|
||||||
|
EventsTag string `toml:"events_tag"`
|
||||||
|
|
||||||
|
parsedEvents []*eventWithQuals
|
||||||
|
parsedSockets []int
|
||||||
|
allEvents bool
|
||||||
|
|
||||||
|
activeMultiEvents []multiEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiEvent struct {
|
||||||
|
activeEvents []*ia.ActiveEvent
|
||||||
|
perfEvent *ia.PerfEvent
|
||||||
|
socket int
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventWithQuals struct {
|
||||||
|
name string
|
||||||
|
qualifiers []string
|
||||||
|
|
||||||
|
custom ia.CustomizableEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) Description() string {
|
||||||
|
return "Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
## List of filesystem locations of JSON files that contain PMU event definitions.
|
||||||
|
event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"]
|
||||||
|
|
||||||
|
## List of core events measurement entities. There can be more than one core_events sections.
|
||||||
|
[[inputs.intel_pmu.core_events]]
|
||||||
|
## List of events to be counted. Event names shall match names from event_definitions files.
|
||||||
|
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
|
||||||
|
## If absent, all core events from provided event_definitions are counted skipping unresolvable ones.
|
||||||
|
events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"]
|
||||||
|
|
||||||
|
## Limits the counting of events to core numbers specified.
|
||||||
|
## If absent, events are counted on all cores.
|
||||||
|
## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element.
|
||||||
|
## example: cores = ["0,2", "4", "12-16"]
|
||||||
|
cores = ["0"]
|
||||||
|
|
||||||
|
## Indicator that plugin shall attempt to run core_events.events as a single perf group.
|
||||||
|
## If absent or set to false, each event is counted individually. Defaults to false.
|
||||||
|
## This limits the number of events that can be measured to a maximum of available hardware counters per core.
|
||||||
|
## Could vary depending on type of event, use of fixed counters.
|
||||||
|
# perf_group = false
|
||||||
|
|
||||||
|
## Optionally set a custom tag value that will be added to every measurement within this events group.
|
||||||
|
## Can be applied to any group of events, unrelated to perf_group setting.
|
||||||
|
# events_tag = ""
|
||||||
|
|
||||||
|
## List of uncore event measurement entities. There can be more than one uncore_events sections.
|
||||||
|
[[inputs.intel_pmu.uncore_events]]
|
||||||
|
## List of events to be counted. Event names shall match names from event_definitions files.
|
||||||
|
## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers.
|
||||||
|
## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones.
|
||||||
|
events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"]
|
||||||
|
|
||||||
|
## Limits the counting of events to specified sockets.
|
||||||
|
## If absent, events are counted on all sockets.
|
||||||
|
## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element.
|
||||||
|
## example: sockets = ["0-2"]
|
||||||
|
sockets = ["0"]
|
||||||
|
|
||||||
|
## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore.
|
||||||
|
## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false.
|
||||||
|
# aggregate_uncore_units = false
|
||||||
|
|
||||||
|
## Optionally set a custom tag value that will be added to every measurement within this events group.
|
||||||
|
# events_tag = ""
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start is required for IntelPMU to implement the telegraf.ServiceInput interface.
|
||||||
|
// Necessary initialization and config checking are done in Init.
|
||||||
|
func (IntelPMU) Start(_ telegraf.Accumulator) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) Init() error {
|
||||||
|
err := checkFiles(i.EventListPaths, i.fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during event definitions paths validation: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := newReader(i.EventListPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
transformer := ia.NewPerfTransformer()
|
||||||
|
resolver := &iaEntitiesResolver{reader: reader, transformer: transformer, log: i.Log}
|
||||||
|
parser := &configParser{log: i.Log, sys: &iaSysInfo{}}
|
||||||
|
activator := &iaEntitiesActivator{perfActivator: &iaEventsActivator{}, placementMaker: &iaPlacementMaker{}}
|
||||||
|
|
||||||
|
i.entitiesReader = &iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: &realClock{}}
|
||||||
|
|
||||||
|
return i.initialization(parser, resolver, activator)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error {
|
||||||
|
if parser == nil || resolver == nil || activator == nil {
|
||||||
|
return fmt.Errorf("entities parser and/or resolver and/or activator is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := parser.parseEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during parsing configuration sections: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during events resolving: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = i.checkFileDescriptors()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during file descriptors checking: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = activator.activateEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error during events activation: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) checkFileDescriptors() error {
|
||||||
|
coreFd, err := estimateCoresFd(i.CoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err)
|
||||||
|
}
|
||||||
|
uncoreFd, err := estimateUncoreFd(i.UncoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err)
|
||||||
|
}
|
||||||
|
if coreFd > math.MaxUint64-uncoreFd {
|
||||||
|
return fmt.Errorf("requested number of file descriptors exceeds uint64")
|
||||||
|
}
|
||||||
|
allFd := coreFd + uncoreFd
|
||||||
|
|
||||||
|
// maximum file descriptors enforced on a kernel level
|
||||||
|
maxFd, err := readMaxFD(i.fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
i.Log.Warnf("cannot obtain number of available file descriptors: %v", err)
|
||||||
|
} else if allFd > maxFd {
|
||||||
|
return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+
|
||||||
|
": consider increasing the maximum number", allFd, maxFd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// soft limit for current process
|
||||||
|
limit, err := i.fileInfo.fileLimit()
|
||||||
|
if err != nil {
|
||||||
|
i.Log.Warnf("cannot obtain limit value of open files: %v", err)
|
||||||
|
} else if allFd > limit {
|
||||||
|
return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+
|
||||||
|
": consider increasing the limit", allFd, limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if i.entitiesReader == nil {
|
||||||
|
return fmt.Errorf("entities reader is nil")
|
||||||
|
}
|
||||||
|
coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read entities events values: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for id, m := range coreMetrics {
|
||||||
|
scaled := ia.EventScaledValue(m.values)
|
||||||
|
if !scaled.IsUint64() {
|
||||||
|
return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String())
|
||||||
|
}
|
||||||
|
coreMetrics[id].scaled = scaled.Uint64()
|
||||||
|
}
|
||||||
|
for id, m := range uncoreMetrics {
|
||||||
|
scaled := ia.EventScaledValue(m.values)
|
||||||
|
if !scaled.IsUint64() {
|
||||||
|
return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String())
|
||||||
|
}
|
||||||
|
uncoreMetrics[id].scaled = scaled.Uint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
publishCoreMeasurements(coreMetrics, acc)
|
||||||
|
publishUncoreMeasurements(uncoreMetrics, acc)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntelPMU) Stop() {
|
||||||
|
for _, entity := range i.CoreEntities {
|
||||||
|
if entity == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, event := range entity.activeEvents {
|
||||||
|
if event == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := event.Deactivate()
|
||||||
|
if err != nil {
|
||||||
|
i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entity := range i.UncoreEntities {
|
||||||
|
if entity == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, multi := range entity.activeMultiEvents {
|
||||||
|
for _, event := range multi.activeEvents {
|
||||||
|
if event == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := event.Deactivate()
|
||||||
|
if err != nil {
|
||||||
|
i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReader(files []string) (*ia.JSONFilesReader, error) {
|
||||||
|
reader := ia.NewFilesReader()
|
||||||
|
for _, file := range files {
|
||||||
|
err := reader.AddFiles(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to add files to reader: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func estimateCoresFd(entities []*CoreEventEntity) (uint64, error) {
|
||||||
|
var err error
|
||||||
|
number := uint64(0)
|
||||||
|
for _, entity := range entities {
|
||||||
|
if entity == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
events := uint64(len(entity.parsedEvents))
|
||||||
|
cores := uint64(len(entity.parsedCores))
|
||||||
|
number, err = multiplyAndAdd(events, cores, number)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return number, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func estimateUncoreFd(entities []*UncoreEventEntity) (uint64, error) {
|
||||||
|
var err error
|
||||||
|
number := uint64(0)
|
||||||
|
for _, entity := range entities {
|
||||||
|
if entity == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, e := range entity.parsedEvents {
|
||||||
|
if e.custom.Event == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pmus := uint64(len(e.custom.Event.PMUTypes))
|
||||||
|
sockets := uint64(len(entity.parsedSockets))
|
||||||
|
number, err = multiplyAndAdd(pmus, sockets, number)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return number, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error) {
|
||||||
|
bigA := new(big.Int).SetUint64(factorA)
|
||||||
|
bigB := new(big.Int).SetUint64(factorB)
|
||||||
|
activeEvents := new(big.Int).Mul(bigA, bigB)
|
||||||
|
if !activeEvents.IsUint64() {
|
||||||
|
return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String())
|
||||||
|
}
|
||||||
|
if sum > math.MaxUint64-activeEvents.Uint64() {
|
||||||
|
return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum)))
|
||||||
|
}
|
||||||
|
sum += activeEvents.Uint64()
|
||||||
|
return sum, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMaxFD(reader fileInfoProvider) (uint64, error) {
|
||||||
|
if reader == nil {
|
||||||
|
return 0, fmt.Errorf("file reader is nil")
|
||||||
|
}
|
||||||
|
buf, err := reader.readFile(fileMaxPath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err)
|
||||||
|
}
|
||||||
|
max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err)
|
||||||
|
}
|
||||||
|
return max, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkFiles(paths []string, fileInfo fileInfoProvider) error {
|
||||||
|
// No event definition JSON locations present
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return fmt.Errorf("no paths were given")
|
||||||
|
}
|
||||||
|
if fileInfo == nil {
|
||||||
|
return fmt.Errorf("file info provider is nil")
|
||||||
|
}
|
||||||
|
// Wrong files
|
||||||
|
for _, path := range paths {
|
||||||
|
lInfo, err := fileInfo.lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("file `%s` doesn't exist", path)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err)
|
||||||
|
}
|
||||||
|
mode := lInfo.Mode()
|
||||||
|
if mode&os.ModeSymlink != 0 {
|
||||||
|
return fmt.Errorf("file %s is a symlink", path)
|
||||||
|
}
|
||||||
|
if !mode.IsRegular() {
|
||||||
|
return fmt.Errorf("file `%s` doesn't point to a reagular file", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func publishCoreMeasurements(metrics []coreMetric, acc telegraf.Accumulator) {
|
||||||
|
for _, m := range metrics {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
|
||||||
|
fields["raw"] = m.values.Raw
|
||||||
|
fields["enabled"] = m.values.Enabled
|
||||||
|
fields["running"] = m.values.Running
|
||||||
|
fields["scaled"] = m.scaled
|
||||||
|
|
||||||
|
tags["event"] = m.name
|
||||||
|
tags["cpu"] = strconv.Itoa(m.cpu)
|
||||||
|
|
||||||
|
if len(m.tag) > 0 {
|
||||||
|
tags["events_tag"] = m.tag
|
||||||
|
}
|
||||||
|
acc.AddFields("pmu_metric", fields, tags, m.time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func publishUncoreMeasurements(metrics []uncoreMetric, acc telegraf.Accumulator) {
|
||||||
|
for _, m := range metrics {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
|
||||||
|
fields["raw"] = m.values.Raw
|
||||||
|
fields["enabled"] = m.values.Enabled
|
||||||
|
fields["running"] = m.values.Running
|
||||||
|
fields["scaled"] = m.scaled
|
||||||
|
|
||||||
|
tags["event"] = m.name
|
||||||
|
|
||||||
|
tags["socket"] = strconv.Itoa(m.socket)
|
||||||
|
tags["unit_type"] = m.unitType
|
||||||
|
if !m.agg {
|
||||||
|
tags["unit"] = m.unit
|
||||||
|
}
|
||||||
|
if len(m.tag) > 0 {
|
||||||
|
tags["events_tag"] = m.tag
|
||||||
|
}
|
||||||
|
acc.AddFields("pmu_metric", fields, tags, m.time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("intel_pmu", func() telegraf.Input {
|
||||||
|
pmu := IntelPMU{
|
||||||
|
fileInfo: &fileHelper{},
|
||||||
|
}
|
||||||
|
return &pmu
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
//go:build !linux || !amd64
|
||||||
|
// +build !linux !amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
@ -0,0 +1,555 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInitialization(t *testing.T) {
|
||||||
|
mError := errors.New("mock error")
|
||||||
|
mParser := &mockEntitiesParser{}
|
||||||
|
mResolver := &mockEntitiesResolver{}
|
||||||
|
mActivator := &mockEntitiesActivator{}
|
||||||
|
mFileInfo := &mockFileInfoProvider{}
|
||||||
|
|
||||||
|
file := "path/to/file"
|
||||||
|
paths := []string{file}
|
||||||
|
|
||||||
|
t.Run("missing parser, resolver or activator", func(t *testing.T) {
|
||||||
|
err := (&IntelPMU{}).initialization(mParser, nil, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil")
|
||||||
|
err = (&IntelPMU{}).initialization(nil, mResolver, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil")
|
||||||
|
err = (&IntelPMU{}).initialization(nil, nil, mActivator)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("parse entities error", func(t *testing.T) {
|
||||||
|
mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo}
|
||||||
|
|
||||||
|
mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.initialization(mParser, mResolver, mActivator)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "error during parsing configuration sections")
|
||||||
|
mParser.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("resolver error", func(t *testing.T) {
|
||||||
|
mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo}
|
||||||
|
|
||||||
|
mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.initialization(mParser, mResolver, mActivator)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "error during events resolving")
|
||||||
|
mParser.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("exceeded file descriptors", func(t *testing.T) {
|
||||||
|
limit := []byte("10")
|
||||||
|
uncoreEntities := []*UncoreEventEntity{{parsedEvents: makeEvents(10, 21), parsedSockets: makeIDs(5)}}
|
||||||
|
estimation := 1050
|
||||||
|
|
||||||
|
mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo, UncoreEntities: uncoreEntities}
|
||||||
|
|
||||||
|
mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mFileInfo.On("readFile", fileMaxPath).Return(limit, nil).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.initialization(mParser, mResolver, mActivator)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+
|
||||||
|
": consider increasing the maximum number", estimation, 10))
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
mParser.AssertExpectations(t)
|
||||||
|
mResolver.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("failed to activate entities", func(t *testing.T) {
|
||||||
|
mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo}
|
||||||
|
|
||||||
|
mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once()
|
||||||
|
mFileInfo.On("readFile", fileMaxPath).Return(nil, mError).
|
||||||
|
On("fileLimit").Return(uint64(0), mError).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.initialization(mParser, mResolver, mActivator)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "error during events activation")
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
mParser.AssertExpectations(t)
|
||||||
|
mResolver.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("everything all right", func(t *testing.T) {
|
||||||
|
mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo}
|
||||||
|
|
||||||
|
mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
mFileInfo.On("readFile", fileMaxPath).Return(nil, mError).
|
||||||
|
On("fileLimit").Return(uint64(0), mError).Once()
|
||||||
|
mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.initialization(mParser, mResolver, mActivator)
|
||||||
|
require.NoError(t, err)
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
mParser.AssertExpectations(t)
|
||||||
|
mResolver.AssertExpectations(t)
|
||||||
|
mActivator.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGather(t *testing.T) {
|
||||||
|
mEntitiesValuesReader := &mockEntitiesValuesReader{}
|
||||||
|
mAcc := &testutil.Accumulator{}
|
||||||
|
|
||||||
|
mIntelPMU := &IntelPMU{entitiesReader: mEntitiesValuesReader}
|
||||||
|
|
||||||
|
type fieldWithTags struct {
|
||||||
|
fields map[string]interface{}
|
||||||
|
tags map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("entities reader is nil", func(t *testing.T) {
|
||||||
|
err := (&IntelPMU{entitiesReader: nil}).Gather(mAcc)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entities reader is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error while reading entities", func(t *testing.T) {
|
||||||
|
errMock := fmt.Errorf("houston we have a problem")
|
||||||
|
mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).
|
||||||
|
Return(nil, nil, errMock).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.Gather(mAcc)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to read entities events values: %v", errMock))
|
||||||
|
mEntitiesValuesReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
coreMetrics []coreMetric
|
||||||
|
uncoreMetrics []uncoreMetric
|
||||||
|
results []fieldWithTags
|
||||||
|
errMSg string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "successful readings",
|
||||||
|
coreMetrics: []coreMetric{
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: 100, Enabled: 200, Running: 200},
|
||||||
|
name: "CORE_EVENT_1",
|
||||||
|
tag: "DOGES",
|
||||||
|
cpu: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: 2100, Enabled: 400, Running: 200},
|
||||||
|
name: "CORE_EVENT_2",
|
||||||
|
cpu: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
uncoreMetrics: []uncoreMetric{
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: 2134562, Enabled: 1000000, Running: 1000000},
|
||||||
|
name: "UNCORE_EVENT_1",
|
||||||
|
tag: "SHIBA",
|
||||||
|
unitType: "cbox",
|
||||||
|
unit: "cbox_1",
|
||||||
|
socket: 3,
|
||||||
|
agg: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: 2134562, Enabled: 3222222, Running: 2100000},
|
||||||
|
name: "UNCORE_EVENT_2",
|
||||||
|
unitType: "cbox",
|
||||||
|
socket: 0,
|
||||||
|
agg: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
results: []fieldWithTags{
|
||||||
|
{
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"raw": uint64(100),
|
||||||
|
"enabled": uint64(200),
|
||||||
|
"running": uint64(200),
|
||||||
|
"scaled": uint64(100),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
"event": "CORE_EVENT_1",
|
||||||
|
"cpu": "1",
|
||||||
|
"events_tag": "DOGES",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"raw": uint64(2100),
|
||||||
|
"enabled": uint64(400),
|
||||||
|
"running": uint64(200),
|
||||||
|
"scaled": uint64(4200),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
"event": "CORE_EVENT_2",
|
||||||
|
"cpu": "0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"raw": uint64(2134562),
|
||||||
|
"enabled": uint64(1000000),
|
||||||
|
"running": uint64(1000000),
|
||||||
|
"scaled": uint64(2134562),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
"event": "UNCORE_EVENT_1",
|
||||||
|
"events_tag": "SHIBA",
|
||||||
|
"socket": "3",
|
||||||
|
"unit_type": "cbox",
|
||||||
|
"unit": "cbox_1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"raw": uint64(2134562),
|
||||||
|
"enabled": uint64(3222222),
|
||||||
|
"running": uint64(2100000),
|
||||||
|
"scaled": uint64(3275253),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
"event": "UNCORE_EVENT_2",
|
||||||
|
"socket": "0",
|
||||||
|
"unit_type": "cbox",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "core scaled value greater then max uint64",
|
||||||
|
coreMetrics: []coreMetric{
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000},
|
||||||
|
name: "I_AM_TOO_BIG",
|
||||||
|
tag: "BIG_FISH",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uncore scaled value greater then max uint64",
|
||||||
|
uncoreMetrics: []uncoreMetric{
|
||||||
|
{
|
||||||
|
values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000},
|
||||||
|
name: "I_AM_TOO_BIG_UNCORE",
|
||||||
|
tag: "BIG_FISH",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).
|
||||||
|
Return(test.coreMetrics, test.uncoreMetrics, nil).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.Gather(mAcc)
|
||||||
|
|
||||||
|
mEntitiesValuesReader.AssertExpectations(t)
|
||||||
|
if len(test.errMSg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.errMSg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, result := range test.results {
|
||||||
|
mAcc.AssertContainsTaggedFields(t, "pmu_metric", result.fields, result.tags)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckFileDescriptors(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
uncores []*UncoreEventEntity
|
||||||
|
cores []*CoreEventEntity
|
||||||
|
estimation uint64
|
||||||
|
maxFD []byte
|
||||||
|
fileLimit uint64
|
||||||
|
errMsg string
|
||||||
|
}{
|
||||||
|
{"exceed maximum file descriptors number", []*UncoreEventEntity{
|
||||||
|
{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)},
|
||||||
|
{parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)},
|
||||||
|
{parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}},
|
||||||
|
[]*CoreEventEntity{
|
||||||
|
{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)},
|
||||||
|
{parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)},
|
||||||
|
{parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}},
|
||||||
|
12020, []byte("11000"), 8000, fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+
|
||||||
|
": consider increasing the maximum number", 12020, 11000),
|
||||||
|
},
|
||||||
|
{"exceed soft file limit", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, []*CoreEventEntity{
|
||||||
|
{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}},
|
||||||
|
11000, []byte("2515357"), 800, fmt.Sprintf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+
|
||||||
|
": consider increasing the limit", 11000, 800),
|
||||||
|
},
|
||||||
|
{"no exceeds", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}},
|
||||||
|
[]*CoreEventEntity{{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}},
|
||||||
|
11000, []byte("2515357"), 13000, "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
mFileInfo := &mockFileInfoProvider{}
|
||||||
|
mIntelPMU := IntelPMU{
|
||||||
|
CoreEntities: test.cores,
|
||||||
|
UncoreEntities: test.uncores,
|
||||||
|
fileInfo: mFileInfo,
|
||||||
|
Log: testutil.Logger{},
|
||||||
|
}
|
||||||
|
mFileInfo.On("readFile", fileMaxPath).Return(test.maxFD, nil).
|
||||||
|
On("fileLimit").Return(test.fileLimit, nil).Once()
|
||||||
|
|
||||||
|
err := mIntelPMU.checkFileDescriptors()
|
||||||
|
if len(test.errMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.errMsg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEstimateUncoreFd(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
entities []*UncoreEventEntity
|
||||||
|
result uint64
|
||||||
|
}{
|
||||||
|
{"nil entities", nil, 0},
|
||||||
|
{"nil perf event", []*UncoreEventEntity{{parsedEvents: []*eventWithQuals{{"", nil, ia.CustomizableEvent{}}}, parsedSockets: makeIDs(0)}}, 0},
|
||||||
|
{"one uncore entity", []*UncoreEventEntity{{parsedEvents: makeEvents(10, 10), parsedSockets: makeIDs(20)}}, 2000},
|
||||||
|
{"nil entity", []*UncoreEventEntity{nil, {parsedEvents: makeEvents(1, 8), parsedSockets: makeIDs(1)}}, 8},
|
||||||
|
{"many core entities", []*UncoreEventEntity{
|
||||||
|
{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)},
|
||||||
|
{parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)},
|
||||||
|
{parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)},
|
||||||
|
}, 11305},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
mIntelPMU := IntelPMU{UncoreEntities: test.entities}
|
||||||
|
result, err := estimateUncoreFd(mIntelPMU.UncoreEntities)
|
||||||
|
require.Equal(t, test.result, result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEstimateCoresFd(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
entities []*CoreEventEntity
|
||||||
|
result uint64
|
||||||
|
}{
|
||||||
|
{"nil entities", nil, 0},
|
||||||
|
{"one core entity", []*CoreEventEntity{{parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200},
|
||||||
|
{"nil entity", []*CoreEventEntity{nil, {parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200},
|
||||||
|
{"many core entities", []*CoreEventEntity{
|
||||||
|
{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)},
|
||||||
|
{parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)},
|
||||||
|
{parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)},
|
||||||
|
}, 715},
|
||||||
|
{"1024 events", []*CoreEventEntity{{parsedEvents: makeEvents(1024, 1), parsedCores: makeIDs(12)}}, 12288},
|
||||||
|
{"big number", []*CoreEventEntity{{parsedEvents: makeEvents(1048576, 1), parsedCores: makeIDs(1024)}}, 1073741824},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
mIntelPMU := IntelPMU{CoreEntities: test.entities}
|
||||||
|
result, err := estimateCoresFd(mIntelPMU.CoreEntities)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, test.result, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeEvents(number int, pmusNumber int) []*eventWithQuals {
|
||||||
|
a := make([]*eventWithQuals, number)
|
||||||
|
for i := range a {
|
||||||
|
b := make([]ia.NamedPMUType, pmusNumber)
|
||||||
|
for j := range b {
|
||||||
|
b[j] = ia.NamedPMUType{}
|
||||||
|
}
|
||||||
|
a[i] = &eventWithQuals{fmt.Sprintf("EVENT.%d", i), nil,
|
||||||
|
ia.CustomizableEvent{Event: &ia.PerfEvent{PMUTypes: b}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeIDs(number int) []int {
|
||||||
|
a := make([]int, number)
|
||||||
|
for i := range a {
|
||||||
|
a[i] = i
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadMaxFD(t *testing.T) {
|
||||||
|
mFileReader := &mockFileInfoProvider{}
|
||||||
|
|
||||||
|
t.Run("reader is nil", func(t *testing.T) {
|
||||||
|
result, err := readMaxFD(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "file reader is nil")
|
||||||
|
require.Zero(t, result)
|
||||||
|
})
|
||||||
|
|
||||||
|
openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath)
|
||||||
|
parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
content []byte
|
||||||
|
maxFD uint64
|
||||||
|
failMsg string
|
||||||
|
}{
|
||||||
|
{"read file error", fmt.Errorf("mock error"), nil, 0, openErrorMsg},
|
||||||
|
{"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg},
|
||||||
|
{"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg},
|
||||||
|
{"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg},
|
||||||
|
{"reading succeeded", nil, []byte("12343122"), 12343122, ""},
|
||||||
|
{"min value reading", nil, []byte("0"), 0, ""},
|
||||||
|
{"max uint 64 reading", nil, []byte("18446744073709551615"), math.MaxUint64, ""},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
mFileReader.On("readFile", fileMaxPath).Return(test.content, test.err).Once()
|
||||||
|
result, err := readMaxFD(mFileReader)
|
||||||
|
|
||||||
|
if len(test.failMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.failMsg)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, test.maxFD, result)
|
||||||
|
mFileReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddFiles(t *testing.T) {
|
||||||
|
mFileInfo := &mockFileInfoProvider{}
|
||||||
|
mError := errors.New("mock error")
|
||||||
|
|
||||||
|
t.Run("no paths", func(t *testing.T) {
|
||||||
|
err := checkFiles([]string{}, mFileInfo)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "no paths were given")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no file info provider", func(t *testing.T) {
|
||||||
|
err := checkFiles([]string{"path/1, path/2"}, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "file info provider is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stat error", func(t *testing.T) {
|
||||||
|
file := "path/to/file"
|
||||||
|
paths := []string{file}
|
||||||
|
mFileInfo.On("lstat", file).Return(nil, mError).Once()
|
||||||
|
|
||||||
|
err := checkFiles(paths, mFileInfo)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file))
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("file does not exist", func(t *testing.T) {
|
||||||
|
file := "path/to/file"
|
||||||
|
paths := []string{file}
|
||||||
|
mFileInfo.On("lstat", file).Return(nil, os.ErrNotExist).Once()
|
||||||
|
|
||||||
|
err := checkFiles(paths, mFileInfo)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file))
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("file is symlink", func(t *testing.T) {
|
||||||
|
file := "path/to/symlink"
|
||||||
|
paths := []string{file}
|
||||||
|
fileInfo := fakeFileInfo{fileMode: os.ModeSymlink}
|
||||||
|
mFileInfo.On("lstat", file).Return(fileInfo, nil).Once()
|
||||||
|
|
||||||
|
err := checkFiles(paths, mFileInfo)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file))
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("file doesn't point to a regular file", func(t *testing.T) {
|
||||||
|
file := "path/to/file"
|
||||||
|
paths := []string{file}
|
||||||
|
fileInfo := fakeFileInfo{fileMode: os.ModeDir}
|
||||||
|
mFileInfo.On("lstat", file).Return(fileInfo, nil).Once()
|
||||||
|
|
||||||
|
err := checkFiles(paths, mFileInfo)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file))
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("checking succeeded", func(t *testing.T) {
|
||||||
|
paths := []string{"path/to/file1", "path/to/file2", "path/to/file3"}
|
||||||
|
fileInfo := fakeFileInfo{}
|
||||||
|
|
||||||
|
for _, file := range paths {
|
||||||
|
mFileInfo.On("lstat", file).Return(fileInfo, nil).Once()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := checkFiles(paths, mFileInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
mFileInfo.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeFileInfo struct {
|
||||||
|
fileMode os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fakeFileInfo) Name() string { return "" }
|
||||||
|
func (f fakeFileInfo) Size() int64 { return 0 }
|
||||||
|
func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode }
|
||||||
|
func (f fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
||||||
|
func (f fakeFileInfo) IsDir() bool { return false }
|
||||||
|
func (f fakeFileInfo) Sys() interface{} { return nil }
|
||||||
|
|
@ -0,0 +1,407 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mockValuesReader is an autogenerated mock type for the valuesReader type
|
||||||
|
type mockValuesReader struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// readValue provides a mock function with given fields: event
|
||||||
|
func (_m *mockValuesReader) readValue(event *iaevents.ActiveEvent) (iaevents.CounterValue, error) {
|
||||||
|
ret := _m.Called(event)
|
||||||
|
|
||||||
|
var r0 iaevents.CounterValue
|
||||||
|
if rf, ok := ret.Get(0).(func(*iaevents.ActiveEvent) iaevents.CounterValue); ok {
|
||||||
|
r0 = rf(event)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(iaevents.CounterValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(*iaevents.ActiveEvent) error); ok {
|
||||||
|
r1 = rf(event)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockEntitiesValuesReader is an autogenerated mock type for the entitiesValuesReader type
|
||||||
|
type mockEntitiesValuesReader struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEntities provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *mockEntitiesValuesReader) readEntities(_a0 []*CoreEventEntity, _a1 []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 []coreMetric
|
||||||
|
if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) []coreMetric); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]coreMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 []uncoreMetric
|
||||||
|
if rf, ok := ret.Get(1).(func([]*CoreEventEntity, []*UncoreEventEntity) []uncoreMetric); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(1) != nil {
|
||||||
|
r1 = ret.Get(1).([]uncoreMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r2 error
|
||||||
|
if rf, ok := ret.Get(2).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok {
|
||||||
|
r2 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r2 = ret.Error(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1, r2
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockEntitiesActivator is an autogenerated mock type for the entitiesActivator type
|
||||||
|
type mockEntitiesActivator struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// activateEntities provides a mock function with given fields: coreEntities, uncoreEntities
|
||||||
|
func (_m *mockEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
|
ret := _m.Called(coreEntities, uncoreEntities)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok {
|
||||||
|
r0 = rf(coreEntities, uncoreEntities)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockEntitiesParser is an autogenerated mock type for the entitiesParser type
|
||||||
|
type mockEntitiesParser struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseEntities provides a mock function with given fields: coreEntities, uncoreEntities
|
||||||
|
func (_m *mockEntitiesParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
|
ret := _m.Called(coreEntities, uncoreEntities)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok {
|
||||||
|
r0 = rf(coreEntities, uncoreEntities)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockEntitiesResolver is an autogenerated mock type for the entitiesResolver type
|
||||||
|
type mockEntitiesResolver struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveEntities provides a mock function with given fields: coreEntities, uncoreEntities
|
||||||
|
func (_m *mockEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
|
ret := _m.Called(coreEntities, uncoreEntities)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok {
|
||||||
|
r0 = rf(coreEntities, uncoreEntities)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockEventsActivator is an autogenerated mock type for the eventsActivator type
|
||||||
|
type mockEventsActivator struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// activateEvent provides a mock function with given fields: _a0, _a1, _a2
|
||||||
|
func (_m *mockEventsActivator) activateEvent(_a0 iaevents.Activator, _a1 iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveEvent, error) {
|
||||||
|
ret := _m.Called(_a0, _a1, _a2)
|
||||||
|
|
||||||
|
var r0 *iaevents.ActiveEvent
|
||||||
|
if rf, ok := ret.Get(0).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveEvent); ok {
|
||||||
|
r0 = rf(_a0, _a1, _a2)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*iaevents.ActiveEvent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) error); ok {
|
||||||
|
r1 = rf(_a0, _a1, _a2)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// activateGroup provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *mockEventsActivator) activateGroup(_a0 iaevents.PlacementProvider, _a1 []iaevents.CustomizableEvent) (*iaevents.ActiveEventGroup, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *iaevents.ActiveEventGroup
|
||||||
|
if rf, ok := ret.Get(0).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) *iaevents.ActiveEventGroup); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*iaevents.ActiveEventGroup)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// activateMulti provides a mock function with given fields: _a0, _a1, _a2
|
||||||
|
func (_m *mockEventsActivator) activateMulti(_a0 iaevents.MultiActivator, _a1 []iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveMultiEvent, error) {
|
||||||
|
ret := _m.Called(_a0, _a1, _a2)
|
||||||
|
|
||||||
|
var r0 *iaevents.ActiveMultiEvent
|
||||||
|
if rf, ok := ret.Get(0).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveMultiEvent); ok {
|
||||||
|
r0 = rf(_a0, _a1, _a2)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*iaevents.ActiveMultiEvent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) error); ok {
|
||||||
|
r1 = rf(_a0, _a1, _a2)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockFileInfoProvider is an autogenerated mock type for the fileInfoProvider type
|
||||||
|
type mockFileInfoProvider struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileLimit provides a mock function with given fields:
|
||||||
|
func (_m *mockFileInfoProvider) fileLimit() (uint64, error) {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 uint64
|
||||||
|
if rf, ok := ret.Get(0).(func() uint64); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func() error); ok {
|
||||||
|
r1 = rf()
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFile provides a mock function with given fields: _a0
|
||||||
|
func (_m *mockFileInfoProvider) readFile(_a0 string) ([]byte, error) {
|
||||||
|
ret := _m.Called(_a0)
|
||||||
|
|
||||||
|
var r0 []byte
|
||||||
|
if rf, ok := ret.Get(0).(func(string) []byte); ok {
|
||||||
|
r0 = rf(_a0)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||||
|
r1 = rf(_a0)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// lstat provides a mock function with given fields: _a0
|
||||||
|
func (_m *mockFileInfoProvider) lstat(_a0 string) (os.FileInfo, error) {
|
||||||
|
ret := _m.Called(_a0)
|
||||||
|
|
||||||
|
var r0 os.FileInfo
|
||||||
|
if rf, ok := ret.Get(0).(func(string) os.FileInfo); ok {
|
||||||
|
r0 = rf(_a0)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(os.FileInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||||
|
r1 = rf(_a0)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockPlacementMaker is an autogenerated mock type for the placementMaker type
|
||||||
|
type mockPlacementMaker struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeCorePlacements provides a mock function with given fields: cores, perfEvent
|
||||||
|
func (_m *mockPlacementMaker) makeCorePlacements(cores []int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) {
|
||||||
|
ret := _m.Called(cores, factory)
|
||||||
|
|
||||||
|
var r0 []iaevents.PlacementProvider
|
||||||
|
if rf, ok := ret.Get(0).(func([]int, iaevents.PlacementFactory) []iaevents.PlacementProvider); ok {
|
||||||
|
r0 = rf(cores, factory)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]iaevents.PlacementProvider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func([]int, iaevents.PlacementFactory) error); ok {
|
||||||
|
r1 = rf(cores, factory)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeUncorePlacements provides a mock function with given fields: factory, socket
|
||||||
|
func (_m *mockPlacementMaker) makeUncorePlacements(socket int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) {
|
||||||
|
ret := _m.Called(factory, socket)
|
||||||
|
|
||||||
|
var r0 []iaevents.PlacementProvider
|
||||||
|
if rf, ok := ret.Get(0).(func(iaevents.PlacementFactory, int) []iaevents.PlacementProvider); ok {
|
||||||
|
r0 = rf(factory, socket)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]iaevents.PlacementProvider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(iaevents.PlacementFactory, int) error); ok {
|
||||||
|
r1 = rf(factory, socket)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockSysInfoProvider is an autogenerated mock type for the sysInfoProvider type
|
||||||
|
type mockSysInfoProvider struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// allCPUs provides a mock function with given fields:
|
||||||
|
func (_m *mockSysInfoProvider) allCPUs() ([]int, error) {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 []int
|
||||||
|
if rf, ok := ret.Get(0).(func() []int); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]int)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func() error); ok {
|
||||||
|
r1 = rf()
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// allSockets provides a mock function with given fields:
|
||||||
|
func (_m *mockSysInfoProvider) allSockets() ([]int, error) {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 []int
|
||||||
|
if rf, ok := ret.Get(0).(func() []int); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]int)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func() error); ok {
|
||||||
|
r1 = rf()
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockTransformer is an autogenerated mock type for the Transformer type
|
||||||
|
type MockTransformer struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform provides a mock function with given fields: reader, matcher
|
||||||
|
func (_m *MockTransformer) Transform(reader iaevents.Reader, matcher iaevents.Matcher) ([]*iaevents.PerfEvent, error) {
|
||||||
|
ret := _m.Called(reader, matcher)
|
||||||
|
|
||||||
|
var r0 []*iaevents.PerfEvent
|
||||||
|
if rf, ok := ret.Get(0).(func(iaevents.Reader, iaevents.Matcher) []*iaevents.PerfEvent); ok {
|
||||||
|
r0 = rf(reader, matcher)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).([]*iaevents.PerfEvent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(iaevents.Reader, iaevents.Matcher) error); ok {
|
||||||
|
r1 = rf(reader, matcher)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,249 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type coreMetric struct {
|
||||||
|
values ia.CounterValue
|
||||||
|
scaled uint64
|
||||||
|
|
||||||
|
name string
|
||||||
|
tag string
|
||||||
|
cpu int
|
||||||
|
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type uncoreMetric struct {
|
||||||
|
values ia.CounterValue
|
||||||
|
scaled uint64
|
||||||
|
|
||||||
|
name string
|
||||||
|
unitType string
|
||||||
|
unit string
|
||||||
|
tag string
|
||||||
|
socket int
|
||||||
|
|
||||||
|
agg bool
|
||||||
|
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type valuesReader interface {
|
||||||
|
readValue(event *ia.ActiveEvent) (ia.CounterValue, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaValuesReader struct{}
|
||||||
|
|
||||||
|
func (iaValuesReader) readValue(event *ia.ActiveEvent) (ia.CounterValue, error) {
|
||||||
|
return event.ReadValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
type entitiesValuesReader interface {
|
||||||
|
readEntities([]*CoreEventEntity, []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaEntitiesValuesReader struct {
|
||||||
|
eventReader valuesReader
|
||||||
|
timer clock
|
||||||
|
}
|
||||||
|
|
||||||
|
type clock interface {
|
||||||
|
now() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type realClock struct{}
|
||||||
|
|
||||||
|
func (realClock) now() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *iaEntitiesValuesReader) readEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) {
|
||||||
|
var coreMetrics []coreMetric
|
||||||
|
var uncoreMetrics []uncoreMetric
|
||||||
|
|
||||||
|
for _, entity := range coreEntities {
|
||||||
|
newMetrics, err := ie.readCoreEvents(entity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
coreMetrics = append(coreMetrics, newMetrics...)
|
||||||
|
}
|
||||||
|
for _, entity := range uncoreEntities {
|
||||||
|
newMetrics, err := ie.readUncoreEvents(entity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
uncoreMetrics = append(uncoreMetrics, newMetrics...)
|
||||||
|
}
|
||||||
|
return coreMetrics, uncoreMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) {
|
||||||
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
|
return nil, fmt.Errorf("event values reader or timer is nil")
|
||||||
|
}
|
||||||
|
if entity == nil {
|
||||||
|
return nil, fmt.Errorf("entity is nil")
|
||||||
|
}
|
||||||
|
metrics := make([]coreMetric, len(entity.activeEvents))
|
||||||
|
errGroup := errgroup.Group{}
|
||||||
|
|
||||||
|
for i, event := range entity.activeEvents {
|
||||||
|
id := i
|
||||||
|
actualEvent := event
|
||||||
|
|
||||||
|
if event == nil || event.PerfEvent == nil {
|
||||||
|
return nil, fmt.Errorf("active event or corresponding perf event is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
values, err := ie.eventReader.readValue(actualEvent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err)
|
||||||
|
}
|
||||||
|
cpu, _ := actualEvent.PMUPlacement()
|
||||||
|
newMetric := coreMetric{
|
||||||
|
values: values,
|
||||||
|
tag: entity.EventsTag,
|
||||||
|
cpu: cpu,
|
||||||
|
name: actualEvent.PerfEvent.Name,
|
||||||
|
time: ie.timer.now(),
|
||||||
|
}
|
||||||
|
metrics[id] = newMetric
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
err := errGroup.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) {
|
||||||
|
if entity == nil {
|
||||||
|
return nil, fmt.Errorf("entity is nil")
|
||||||
|
}
|
||||||
|
var uncoreMetrics []uncoreMetric
|
||||||
|
|
||||||
|
for _, event := range entity.activeMultiEvents {
|
||||||
|
if entity.Aggregate {
|
||||||
|
newMetric, err := ie.readMultiEventAgg(event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newMetric.tag = entity.EventsTag
|
||||||
|
uncoreMetrics = append(uncoreMetrics, newMetric)
|
||||||
|
} else {
|
||||||
|
newMetrics, err := ie.readMultiEventSeparately(event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := range newMetrics {
|
||||||
|
newMetrics[i].tag = entity.EventsTag
|
||||||
|
}
|
||||||
|
uncoreMetrics = append(uncoreMetrics, newMetrics...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return uncoreMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) {
|
||||||
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
|
return nil, fmt.Errorf("event values reader or timer is nil")
|
||||||
|
}
|
||||||
|
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
||||||
|
return nil, fmt.Errorf("no active events or perf event is nil")
|
||||||
|
}
|
||||||
|
activeEvents := multiEvent.activeEvents
|
||||||
|
perfEvent := multiEvent.perfEvent
|
||||||
|
|
||||||
|
metrics := make([]uncoreMetric, len(activeEvents))
|
||||||
|
group := errgroup.Group{}
|
||||||
|
|
||||||
|
for i, event := range activeEvents {
|
||||||
|
id := i
|
||||||
|
actualEvent := event
|
||||||
|
|
||||||
|
group.Go(func() error {
|
||||||
|
values, err := ie.eventReader.readValue(actualEvent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err)
|
||||||
|
}
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: values,
|
||||||
|
socket: multiEvent.socket,
|
||||||
|
unitType: perfEvent.PMUName,
|
||||||
|
name: perfEvent.Name,
|
||||||
|
unit: actualEvent.PMUName(),
|
||||||
|
time: ie.timer.now(),
|
||||||
|
}
|
||||||
|
metrics[id] = newMetric
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
err := group.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) {
|
||||||
|
if ie.eventReader == nil || ie.timer == nil {
|
||||||
|
return uncoreMetric{}, fmt.Errorf("event values reader or timer is nil")
|
||||||
|
}
|
||||||
|
if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil {
|
||||||
|
return uncoreMetric{}, fmt.Errorf("no active events or perf event is nil")
|
||||||
|
}
|
||||||
|
activeEvents := multiEvent.activeEvents
|
||||||
|
perfEvent := multiEvent.perfEvent
|
||||||
|
|
||||||
|
values := make([]ia.CounterValue, len(activeEvents))
|
||||||
|
group := errgroup.Group{}
|
||||||
|
|
||||||
|
for i, event := range activeEvents {
|
||||||
|
id := i
|
||||||
|
actualEvent := event
|
||||||
|
|
||||||
|
group.Go(func() error {
|
||||||
|
value, err := ie.eventReader.readValue(actualEvent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err)
|
||||||
|
}
|
||||||
|
values[id] = value
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
err := group.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return uncoreMetric{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bRaw, bEnabled, bRunning := ia.AggregateValues(values)
|
||||||
|
if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() {
|
||||||
|
return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent)
|
||||||
|
}
|
||||||
|
aggValues := ia.CounterValue{
|
||||||
|
Raw: bRaw.Uint64(),
|
||||||
|
Enabled: bEnabled.Uint64(),
|
||||||
|
Running: bRunning.Uint64(),
|
||||||
|
}
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: aggValues,
|
||||||
|
socket: multiEvent.socket,
|
||||||
|
unitType: perfEvent.PMUName,
|
||||||
|
name: perfEvent.Name,
|
||||||
|
time: ie.timer.now(),
|
||||||
|
}
|
||||||
|
return newMetric, nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,522 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type moonClock struct{}
|
||||||
|
|
||||||
|
func (moonClock) now() time.Time {
|
||||||
|
return time.Date(1969, 7, 20, 20, 17, 0, 0, time.UTC)
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventWithValues struct {
|
||||||
|
activeEvent *ia.ActiveEvent
|
||||||
|
values ia.CounterValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadCoreEvents(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
|
||||||
|
t.Run("event reader is nil", func(t *testing.T) {
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readCoreEvents(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("timer is nil", func(t *testing.T) {
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readCoreEvents(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: moonClock{}}).readCoreEvents(nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entity is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nil events", func(t *testing.T) {
|
||||||
|
entity := &CoreEventEntity{}
|
||||||
|
|
||||||
|
entity.activeEvents = append(entity.activeEvents, nil)
|
||||||
|
metrics, err := mEntitiesReader.readCoreEvents(entity)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "active event or corresponding perf event is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("reading failed", func(t *testing.T) {
|
||||||
|
errMock := fmt.Errorf("mock error")
|
||||||
|
event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}
|
||||||
|
|
||||||
|
entity := &CoreEventEntity{}
|
||||||
|
|
||||||
|
entity.activeEvents = append(entity.activeEvents, event)
|
||||||
|
mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once()
|
||||||
|
|
||||||
|
metrics, err := mEntitiesReader.readCoreEvents(entity)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock))
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read active events values", func(t *testing.T) {
|
||||||
|
entity := &CoreEventEntity{}
|
||||||
|
var expected []coreMetric
|
||||||
|
|
||||||
|
tEvents := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event2"}}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event3"}}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tEvents {
|
||||||
|
entity.activeEvents = append(entity.activeEvents, tc.activeEvent)
|
||||||
|
cpu, _ := tc.activeEvent.PMUPlacement()
|
||||||
|
newMetric := coreMetric{
|
||||||
|
values: tc.values,
|
||||||
|
tag: entity.EventsTag,
|
||||||
|
cpu: cpu,
|
||||||
|
name: tc.activeEvent.PerfEvent.Name,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
expected = append(expected, newMetric)
|
||||||
|
mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once()
|
||||||
|
}
|
||||||
|
metrics, err := mEntitiesReader.readCoreEvents(entity)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadMultiEventSeparately(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
|
||||||
|
t.Run("event reader is nil", func(t *testing.T) {
|
||||||
|
event := multiEvent{}
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventSeparately(event)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("timer is nil", func(t *testing.T) {
|
||||||
|
event := multiEvent{}
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventSeparately(event)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("multi event is nil", func(t *testing.T) {
|
||||||
|
event := multiEvent{}
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{&iaValuesReader{}, moonClock{}}).readMultiEventSeparately(event)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "no active events or perf event is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("reading failed", func(t *testing.T) {
|
||||||
|
errMock := fmt.Errorf("mock error")
|
||||||
|
perfEvent := &ia.PerfEvent{Name: "event"}
|
||||||
|
|
||||||
|
event := &ia.ActiveEvent{PerfEvent: perfEvent}
|
||||||
|
multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}}
|
||||||
|
|
||||||
|
mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once()
|
||||||
|
|
||||||
|
metrics, err := mEntitiesReader.readMultiEventSeparately(multi)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock))
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read active events values", func(t *testing.T) {
|
||||||
|
perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"}
|
||||||
|
multi := multiEvent{perfEvent: perfEvent}
|
||||||
|
var expected []uncoreMetric
|
||||||
|
|
||||||
|
tEvents := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tEvents {
|
||||||
|
multi.activeEvents = append(multi.activeEvents, tc.activeEvent)
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: tc.values,
|
||||||
|
socket: multi.socket,
|
||||||
|
unitType: multi.perfEvent.PMUName,
|
||||||
|
name: multi.perfEvent.Name,
|
||||||
|
unit: tc.activeEvent.PMUName(),
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
expected = append(expected, newMetric)
|
||||||
|
mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once()
|
||||||
|
}
|
||||||
|
metrics, err := mEntitiesReader.readMultiEventSeparately(multi)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadMultiEventAgg(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
errMock := fmt.Errorf("mock error")
|
||||||
|
|
||||||
|
t.Run("event reader is nil", func(t *testing.T) {
|
||||||
|
event := multiEvent{}
|
||||||
|
_, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventAgg(event)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("timer is nil", func(t *testing.T) {
|
||||||
|
event := multiEvent{}
|
||||||
|
_, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventAgg(event)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event values reader or timer is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
multi multiEvent
|
||||||
|
events []eventWithValues
|
||||||
|
result ia.CounterValue
|
||||||
|
readFail bool
|
||||||
|
errMsg string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no events",
|
||||||
|
multi: multiEvent{perfEvent: perfEvent},
|
||||||
|
events: nil,
|
||||||
|
result: ia.CounterValue{},
|
||||||
|
errMsg: "no active events or perf event is nil",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no perf event",
|
||||||
|
multi: multiEvent{perfEvent: nil, activeEvents: []*ia.ActiveEvent{{}, {}}},
|
||||||
|
events: nil,
|
||||||
|
result: ia.CounterValue{},
|
||||||
|
errMsg: "no active events or perf event is nil",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "successful reading and aggregation",
|
||||||
|
multi: multiEvent{perfEvent: perfEvent},
|
||||||
|
events: []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 5123, Enabled: 1231242, Running: 41123}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4500, Enabled: 1823423, Running: 182343}},
|
||||||
|
},
|
||||||
|
result: ia.CounterValue{Raw: 9623, Enabled: 3054665, Running: 223466},
|
||||||
|
errMsg: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to big numbers",
|
||||||
|
multi: multiEvent{perfEvent: perfEvent},
|
||||||
|
events: []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: math.MaxUint64, Enabled: 0, Running: 0}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}},
|
||||||
|
},
|
||||||
|
result: ia.CounterValue{},
|
||||||
|
errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reading fail",
|
||||||
|
multi: multiEvent{perfEvent: perfEvent},
|
||||||
|
events: []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 0, Enabled: 0, Running: 0}},
|
||||||
|
},
|
||||||
|
readFail: true,
|
||||||
|
result: ia.CounterValue{},
|
||||||
|
errMsg: "failed to read uncore event",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
for _, eventWithValue := range test.events {
|
||||||
|
test.multi.activeEvents = append(test.multi.activeEvents, eventWithValue.activeEvent)
|
||||||
|
if test.readFail {
|
||||||
|
mReader.On("readValue", eventWithValue.activeEvent).Return(ia.CounterValue{}, errMock).Once()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mReader.On("readValue", eventWithValue.activeEvent).Return(eventWithValue.values, nil).Once()
|
||||||
|
}
|
||||||
|
metric, err := mEntitiesReader.readMultiEventAgg(test.multi)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
|
||||||
|
if len(test.errMsg) > 0 {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), test.errMsg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expected := uncoreMetric{
|
||||||
|
values: test.result,
|
||||||
|
socket: test.multi.socket,
|
||||||
|
unitType: test.multi.perfEvent.PMUName,
|
||||||
|
name: test.multi.perfEvent.Name,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, metric)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadUncoreEvents(t *testing.T) {
|
||||||
|
errMock := fmt.Errorf("mock error")
|
||||||
|
|
||||||
|
t.Run("entity is nil", func(t *testing.T) {
|
||||||
|
metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entity is nil")
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read aggregated entities", func(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
|
||||||
|
perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}}
|
||||||
|
perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}}
|
||||||
|
|
||||||
|
multi := multiEvent{perfEvent: perfEvent}
|
||||||
|
events := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}},
|
||||||
|
}
|
||||||
|
multi2 := multiEvent{perfEvent: perfEvent2}
|
||||||
|
events2 := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}},
|
||||||
|
}
|
||||||
|
for _, event := range events {
|
||||||
|
multi.activeEvents = append(multi.activeEvents, event.activeEvent)
|
||||||
|
mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once()
|
||||||
|
}
|
||||||
|
for _, event := range events2 {
|
||||||
|
multi2.activeEvents = append(multi2.activeEvents, event.activeEvent)
|
||||||
|
mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once()
|
||||||
|
}
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: ia.CounterValue{Raw: 6008, Enabled: 0, Running: 0},
|
||||||
|
socket: multi.socket,
|
||||||
|
unitType: perfEvent.PMUName,
|
||||||
|
name: perfEvent.Name,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
newMetric2 := uncoreMetric{
|
||||||
|
values: ia.CounterValue{Raw: 125008, Enabled: 0, Running: 0},
|
||||||
|
socket: multi2.socket,
|
||||||
|
unitType: perfEvent2.PMUName,
|
||||||
|
name: perfEvent2.Name,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
expected := []uncoreMetric{newMetric, newMetric2}
|
||||||
|
entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi, multi2}}
|
||||||
|
|
||||||
|
metrics, err := mEntitiesReader.readUncoreEvents(entityAgg)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
|
||||||
|
t.Run("reading error", func(t *testing.T) {
|
||||||
|
event := &ia.ActiveEvent{PerfEvent: perfEvent}
|
||||||
|
multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}}
|
||||||
|
|
||||||
|
mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once()
|
||||||
|
|
||||||
|
entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi}}
|
||||||
|
metrics, err = mEntitiesReader.readUncoreEvents(entityAgg)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read distributed entities", func(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
|
||||||
|
perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}}
|
||||||
|
perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}}
|
||||||
|
|
||||||
|
multi := multiEvent{perfEvent: perfEvent, socket: 2}
|
||||||
|
events := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}},
|
||||||
|
}
|
||||||
|
multi2 := multiEvent{perfEvent: perfEvent2, socket: 1}
|
||||||
|
events2 := []eventWithValues{
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}},
|
||||||
|
{&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}},
|
||||||
|
}
|
||||||
|
var expected []uncoreMetric
|
||||||
|
for _, event := range events {
|
||||||
|
multi.activeEvents = append(multi.activeEvents, event.activeEvent)
|
||||||
|
mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once()
|
||||||
|
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: event.values,
|
||||||
|
socket: multi.socket,
|
||||||
|
unitType: perfEvent.PMUName,
|
||||||
|
name: perfEvent.Name,
|
||||||
|
unit: event.activeEvent.PMUName(),
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
expected = append(expected, newMetric)
|
||||||
|
}
|
||||||
|
for _, event := range events2 {
|
||||||
|
multi2.activeEvents = append(multi2.activeEvents, event.activeEvent)
|
||||||
|
mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once()
|
||||||
|
|
||||||
|
newMetric := uncoreMetric{
|
||||||
|
values: event.values,
|
||||||
|
socket: multi2.socket,
|
||||||
|
unitType: perfEvent2.PMUName,
|
||||||
|
name: perfEvent2.Name,
|
||||||
|
unit: event.activeEvent.PMUName(),
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
expected = append(expected, newMetric)
|
||||||
|
}
|
||||||
|
entity := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi, multi2}}
|
||||||
|
|
||||||
|
metrics, err := mEntitiesReader.readUncoreEvents(entity)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
|
||||||
|
t.Run("reading error", func(t *testing.T) {
|
||||||
|
event := &ia.ActiveEvent{PerfEvent: perfEvent}
|
||||||
|
multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}}
|
||||||
|
|
||||||
|
mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once()
|
||||||
|
|
||||||
|
entityAgg := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi}}
|
||||||
|
metrics, err = mEntitiesReader.readUncoreEvents(entityAgg)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Nil(t, metrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadEntities(t *testing.T) {
|
||||||
|
mReader := &mockValuesReader{}
|
||||||
|
mTimer := &moonClock{}
|
||||||
|
mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer}
|
||||||
|
|
||||||
|
t.Run("read entities", func(t *testing.T) {
|
||||||
|
values := ia.CounterValue{}
|
||||||
|
socket := 0
|
||||||
|
|
||||||
|
corePerfEvent := &ia.PerfEvent{Name: "core event 1", PMUName: "cpu"}
|
||||||
|
activeCoreEvent := []*ia.ActiveEvent{{PerfEvent: corePerfEvent}}
|
||||||
|
coreMetric1 := coreMetric{values: values, name: corePerfEvent.Name, time: mTimer.now()}
|
||||||
|
|
||||||
|
corePerfEvent2 := &ia.PerfEvent{Name: "core event 2", PMUName: "cpu"}
|
||||||
|
activeCoreEvent2 := []*ia.ActiveEvent{{PerfEvent: corePerfEvent2}}
|
||||||
|
coreMetric2 := coreMetric{values: values, name: corePerfEvent2.Name, time: mTimer.now()}
|
||||||
|
|
||||||
|
uncorePerfEvent := &ia.PerfEvent{Name: "uncore event 1", PMUName: "cbox"}
|
||||||
|
activeUncoreEvent := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent}}
|
||||||
|
uncoreMetric1 := uncoreMetric{
|
||||||
|
values: values,
|
||||||
|
name: uncorePerfEvent.Name,
|
||||||
|
unitType: uncorePerfEvent.PMUName,
|
||||||
|
socket: socket,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
uncorePerfEvent2 := &ia.PerfEvent{Name: "uncore event 2", PMUName: "rig"}
|
||||||
|
activeUncoreEvent2 := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent2}}
|
||||||
|
uncoreMetric2 := uncoreMetric{
|
||||||
|
values: values,
|
||||||
|
name: uncorePerfEvent2.Name,
|
||||||
|
unitType: uncorePerfEvent2.PMUName,
|
||||||
|
socket: socket,
|
||||||
|
time: mTimer.now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
coreEntities := []*CoreEventEntity{{activeEvents: activeCoreEvent}, {activeEvents: activeCoreEvent2}}
|
||||||
|
|
||||||
|
uncoreEntities := []*UncoreEventEntity{
|
||||||
|
{activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent, perfEvent: uncorePerfEvent, socket: socket}}},
|
||||||
|
{activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent2, perfEvent: uncorePerfEvent2, socket: socket}}},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedCoreMetrics := []coreMetric{coreMetric1, coreMetric2}
|
||||||
|
expectedUncoreMetrics := []uncoreMetric{uncoreMetric1, uncoreMetric2}
|
||||||
|
|
||||||
|
mReader.On("readValue", activeCoreEvent[0]).Return(values, nil).Once()
|
||||||
|
mReader.On("readValue", activeCoreEvent2[0]).Return(values, nil).Once()
|
||||||
|
mReader.On("readValue", activeUncoreEvent[0]).Return(values, nil).Once()
|
||||||
|
mReader.On("readValue", activeUncoreEvent2[0]).Return(values, nil).Once()
|
||||||
|
|
||||||
|
coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, uncoreEntities)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedCoreMetrics, coreMetrics)
|
||||||
|
require.NotNil(t, expectedUncoreMetrics, uncoreMetrics)
|
||||||
|
mReader.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("core entity reading failed", func(t *testing.T) {
|
||||||
|
coreEntities := []*CoreEventEntity{nil}
|
||||||
|
coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entity is nil")
|
||||||
|
require.Nil(t, coreMetrics)
|
||||||
|
require.Nil(t, uncoreMetrics)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uncore entity reading failed", func(t *testing.T) {
|
||||||
|
uncoreEntities := []*UncoreEventEntity{nil}
|
||||||
|
coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(nil, uncoreEntities)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "entity is nil")
|
||||||
|
require.Nil(t, coreMetrics)
|
||||||
|
require.Nil(t, uncoreMetrics)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,150 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
)
|
||||||
|
|
||||||
|
type entitiesResolver interface {
|
||||||
|
resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type iaEntitiesResolver struct {
|
||||||
|
reader ia.Reader
|
||||||
|
transformer ia.Transformer
|
||||||
|
log telegraf.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error {
|
||||||
|
for _, entity := range coreEntities {
|
||||||
|
if entity == nil {
|
||||||
|
return fmt.Errorf("core entity is nil")
|
||||||
|
}
|
||||||
|
if entity.allEvents {
|
||||||
|
newEvents, _, err := e.resolveAllEvents()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to resolve all events: %v", err)
|
||||||
|
}
|
||||||
|
entity.parsedEvents = newEvents
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, event := range entity.parsedEvents {
|
||||||
|
if event == nil {
|
||||||
|
return fmt.Errorf("parsed core event is nil")
|
||||||
|
}
|
||||||
|
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
if customEvent.Event.Uncore {
|
||||||
|
return fmt.Errorf("uncore event `%s` found in core entity", event.name)
|
||||||
|
}
|
||||||
|
event.custom = customEvent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entity := range uncoreEntities {
|
||||||
|
if entity == nil {
|
||||||
|
return fmt.Errorf("uncore entity is nil")
|
||||||
|
}
|
||||||
|
if entity.allEvents {
|
||||||
|
_, newEvents, err := e.resolveAllEvents()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to resolve all events: %v", err)
|
||||||
|
}
|
||||||
|
entity.parsedEvents = newEvents
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, event := range entity.parsedEvents {
|
||||||
|
if event == nil {
|
||||||
|
return fmt.Errorf("parsed uncore event is nil")
|
||||||
|
}
|
||||||
|
customEvent, err := e.resolveEvent(event.name, event.qualifiers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err)
|
||||||
|
}
|
||||||
|
if !customEvent.Event.Uncore {
|
||||||
|
return fmt.Errorf("core event `%s` found in uncore entity", event.name)
|
||||||
|
}
|
||||||
|
event.custom = customEvent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, uncoreEvents []*eventWithQuals, err error) {
|
||||||
|
if e.transformer == nil {
|
||||||
|
return nil, nil, errors.New("transformer is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher())
|
||||||
|
if err != nil {
|
||||||
|
re, ok := err.(*ia.TransformationError)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if e.log != nil && re != nil {
|
||||||
|
var eventErrs []string
|
||||||
|
for _, eventErr := range re.Errors() {
|
||||||
|
if eventErr == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eventErrs = append(eventErrs, eventErr.Error())
|
||||||
|
}
|
||||||
|
errorsStr := strings.Join(eventErrs, ",\n")
|
||||||
|
e.log.Warnf("Cannot resolve all of the events from provided files:\n%s.\nSome events may be omitted.", errorsStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, perfEvent := range perfEvents {
|
||||||
|
newEvent := &eventWithQuals{
|
||||||
|
name: perfEvent.Name,
|
||||||
|
custom: ia.CustomizableEvent{Event: perfEvent},
|
||||||
|
}
|
||||||
|
// build options for event
|
||||||
|
newEvent.custom.Options, err = ia.NewOptions().Build()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err)
|
||||||
|
}
|
||||||
|
if perfEvent.Uncore {
|
||||||
|
uncoreEvents = append(uncoreEvents, newEvent)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
coreEvents = append(coreEvents, newEvent)
|
||||||
|
}
|
||||||
|
return coreEvents, uncoreEvents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.CustomizableEvent, error) {
|
||||||
|
var custom ia.CustomizableEvent
|
||||||
|
if e.transformer == nil {
|
||||||
|
return custom, errors.New("events transformer is nil")
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
return custom, errors.New("event name is empty")
|
||||||
|
}
|
||||||
|
matcher := ia.NewNameMatcher(name)
|
||||||
|
perfEvents, err := e.transformer.Transform(e.reader, matcher)
|
||||||
|
if err != nil {
|
||||||
|
return custom, fmt.Errorf("failed to transform perf events: %v", err)
|
||||||
|
}
|
||||||
|
if len(perfEvents) < 1 {
|
||||||
|
return custom, fmt.Errorf("failed to resolve unknown event `%s`", name)
|
||||||
|
}
|
||||||
|
// build options for event
|
||||||
|
options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build()
|
||||||
|
if err != nil {
|
||||||
|
return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err)
|
||||||
|
}
|
||||||
|
custom = ia.CustomizableEvent{
|
||||||
|
Event: perfEvents[0],
|
||||||
|
Options: options,
|
||||||
|
}
|
||||||
|
return custom, nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,376 @@
|
||||||
|
//go:build linux && amd64
|
||||||
|
// +build linux,amd64
|
||||||
|
|
||||||
|
package intel_pmu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
ia "github.com/intel/iaevents"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResolveEntities(t *testing.T) {
|
||||||
|
errMock := errors.New("mock error")
|
||||||
|
mLog := testutil.Logger{}
|
||||||
|
mTransformer := &MockTransformer{}
|
||||||
|
mResolver := &iaEntitiesResolver{transformer: mTransformer, log: mLog}
|
||||||
|
|
||||||
|
type test struct {
|
||||||
|
perfEvent *ia.PerfEvent
|
||||||
|
options ia.Options
|
||||||
|
event *eventWithQuals
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("nil entities", func(t *testing.T) {
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{nil}, nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "core entity is nil")
|
||||||
|
|
||||||
|
err = mResolver.resolveEntities(nil, []*UncoreEventEntity{nil})
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "uncore entity is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nil parsed events", func(t *testing.T) {
|
||||||
|
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}}
|
||||||
|
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}}
|
||||||
|
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "parsed core event is nil")
|
||||||
|
|
||||||
|
err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "parsed uncore event is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fail to resolve core events", func(t *testing.T) {
|
||||||
|
name := "mock event 1"
|
||||||
|
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false}
|
||||||
|
matcher := ia.NewNameMatcher(name)
|
||||||
|
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock)
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name))
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fail to resolve uncore events", func(t *testing.T) {
|
||||||
|
name := "mock event 1"
|
||||||
|
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false}
|
||||||
|
matcher := ia.NewNameMatcher(name)
|
||||||
|
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock)
|
||||||
|
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name))
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("resolve all core and uncore events", func(t *testing.T) {
|
||||||
|
mCoreEntity := &CoreEventEntity{allEvents: true}
|
||||||
|
mUncoreEntity := &UncoreEventEntity{allEvents: true}
|
||||||
|
corePerfEvents := []*ia.PerfEvent{
|
||||||
|
{Name: "core event1"},
|
||||||
|
{Name: "core event2"},
|
||||||
|
{Name: "core event3"},
|
||||||
|
}
|
||||||
|
uncorePerfEvents := []*ia.PerfEvent{
|
||||||
|
{Name: "uncore event1", Uncore: true},
|
||||||
|
{Name: "uncore event2", Uncore: true},
|
||||||
|
{Name: "uncore event3", Uncore: true},
|
||||||
|
}
|
||||||
|
matcher := ia.NewNameMatcher()
|
||||||
|
|
||||||
|
t.Run("fail to resolve all core events", func(t *testing.T) {
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock)
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to resolve all events")
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fail to resolve all uncore events", func(t *testing.T) {
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock)
|
||||||
|
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to resolve all events")
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fail to resolve all events with transformationError", func(t *testing.T) {
|
||||||
|
transformErr := &ia.TransformationError{}
|
||||||
|
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, transformErr).Once()
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, transformErr).Once()
|
||||||
|
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents))
|
||||||
|
require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents))
|
||||||
|
for _, coreEvent := range mCoreEntity.parsedEvents {
|
||||||
|
require.Contains(t, corePerfEvents, coreEvent.custom.Event)
|
||||||
|
}
|
||||||
|
for _, uncoreEvent := range mUncoreEntity.parsedEvents {
|
||||||
|
require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event)
|
||||||
|
}
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, nil).Once()
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, nil).Once()
|
||||||
|
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents))
|
||||||
|
require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents))
|
||||||
|
for _, coreEvent := range mCoreEntity.parsedEvents {
|
||||||
|
require.Contains(t, corePerfEvents, coreEvent.custom.Event)
|
||||||
|
}
|
||||||
|
for _, uncoreEvent := range mUncoreEntity.parsedEvents {
|
||||||
|
require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event)
|
||||||
|
}
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uncore event found in core entity", func(t *testing.T) {
|
||||||
|
mQuals := []string{"config1=0x23h"}
|
||||||
|
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||||
|
eventName := "uncore event 1"
|
||||||
|
|
||||||
|
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
|
||||||
|
options: mOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}}
|
||||||
|
|
||||||
|
matcher := ia.NewNameMatcher(eventName)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
|
||||||
|
|
||||||
|
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName))
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("core event found in uncore entity", func(t *testing.T) {
|
||||||
|
mQuals := []string{"config1=0x23h"}
|
||||||
|
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||||
|
eventName := "core event 1"
|
||||||
|
|
||||||
|
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
|
||||||
|
options: mOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}}
|
||||||
|
|
||||||
|
matcher := ia.NewNameMatcher(eventName)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
|
||||||
|
|
||||||
|
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
|
||||||
|
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName))
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("resolve core and uncore events", func(t *testing.T) {
|
||||||
|
var mCoreEvents []*eventWithQuals
|
||||||
|
var nUncoreEvents []*eventWithQuals
|
||||||
|
|
||||||
|
mQuals := []string{"config1=0x23h"}
|
||||||
|
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||||
|
emptyOptions, _ := ia.NewOptions().Build()
|
||||||
|
|
||||||
|
coreTestCases := []test{
|
||||||
|
{event: &eventWithQuals{name: "core1", qualifiers: mQuals},
|
||||||
|
options: mOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "core1"}},
|
||||||
|
{event: &eventWithQuals{name: "core2", qualifiers: nil},
|
||||||
|
options: emptyOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "core2"}},
|
||||||
|
{event: &eventWithQuals{name: "core3", qualifiers: nil},
|
||||||
|
options: emptyOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "core3"}},
|
||||||
|
}
|
||||||
|
uncoreTestCases := []test{
|
||||||
|
{event: &eventWithQuals{name: "uncore1", qualifiers: mQuals},
|
||||||
|
options: mOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "uncore1", Uncore: true}},
|
||||||
|
{event: &eventWithQuals{name: "uncore2", qualifiers: nil},
|
||||||
|
options: emptyOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "uncore2", Uncore: true}},
|
||||||
|
{event: &eventWithQuals{name: "uncore3", qualifiers: nil},
|
||||||
|
options: emptyOptions,
|
||||||
|
perfEvent: &ia.PerfEvent{Name: "uncore3", Uncore: true}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range coreTestCases {
|
||||||
|
matcher := ia.NewNameMatcher(test.event.name)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once()
|
||||||
|
mCoreEvents = append(mCoreEvents, test.event)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range uncoreTestCases {
|
||||||
|
matcher := ia.NewNameMatcher(test.event.name)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once()
|
||||||
|
nUncoreEvents = append(nUncoreEvents, test.event)
|
||||||
|
}
|
||||||
|
|
||||||
|
mCoreEntity := &CoreEventEntity{parsedEvents: mCoreEvents, allEvents: false}
|
||||||
|
mUncoreEntity := &UncoreEventEntity{parsedEvents: nUncoreEvents, allEvents: false}
|
||||||
|
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
for _, test := range append(coreTestCases, uncoreTestCases...) {
|
||||||
|
require.Equal(t, test.perfEvent, test.event.custom.Event)
|
||||||
|
require.Equal(t, test.options, test.event.custom.Options)
|
||||||
|
}
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveAllEvents(t *testing.T) {
|
||||||
|
mTransformer := &MockTransformer{}
|
||||||
|
|
||||||
|
mResolver := &iaEntitiesResolver{transformer: mTransformer}
|
||||||
|
|
||||||
|
t.Run("transformer is nil", func(t *testing.T) {
|
||||||
|
mResolver := &iaEntitiesResolver{transformer: nil}
|
||||||
|
_, _, err := mResolver.resolveAllEvents()
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("transformer returns error", func(t *testing.T) {
|
||||||
|
matcher := ia.NewNameMatcher()
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
|
_, _, err := mResolver.resolveAllEvents()
|
||||||
|
require.Error(t, err)
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no events", func(t *testing.T) {
|
||||||
|
matcher := ia.NewNameMatcher()
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil)
|
||||||
|
|
||||||
|
_, _, err := mResolver.resolveAllEvents()
|
||||||
|
require.NoError(t, err)
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("successfully resolved events", func(t *testing.T) {
|
||||||
|
perfEvent1 := &ia.PerfEvent{Name: "mock1"}
|
||||||
|
perfEvent2 := &ia.PerfEvent{Name: "mock2"}
|
||||||
|
uncorePerfEvent1 := &ia.PerfEvent{Name: "mock3", Uncore: true}
|
||||||
|
uncorePerfEvent2 := &ia.PerfEvent{Name: "mock4", Uncore: true}
|
||||||
|
|
||||||
|
options, _ := ia.NewOptions().Build()
|
||||||
|
perfEvents := []*ia.PerfEvent{perfEvent1, perfEvent2, uncorePerfEvent1, uncorePerfEvent2}
|
||||||
|
|
||||||
|
expectedCore := []*eventWithQuals{
|
||||||
|
{name: perfEvent1.Name, custom: ia.CustomizableEvent{Event: perfEvent1, Options: options}},
|
||||||
|
{name: perfEvent2.Name, custom: ia.CustomizableEvent{Event: perfEvent2, Options: options}},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedUncore := []*eventWithQuals{
|
||||||
|
{name: uncorePerfEvent1.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent1, Options: options}},
|
||||||
|
{name: uncorePerfEvent2.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent2, Options: options}},
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher := ia.NewNameMatcher()
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(perfEvents, nil)
|
||||||
|
|
||||||
|
coreEvents, uncoreEvents, err := mResolver.resolveAllEvents()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedCore, coreEvents)
|
||||||
|
require.Equal(t, expectedUncore, uncoreEvents)
|
||||||
|
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveEvent(t *testing.T) {
|
||||||
|
mTransformer := &MockTransformer{}
|
||||||
|
mEvent := "mock event"
|
||||||
|
|
||||||
|
mResolver := &iaEntitiesResolver{transformer: mTransformer}
|
||||||
|
|
||||||
|
t.Run("transformer is nil", func(t *testing.T) {
|
||||||
|
mResolver := &iaEntitiesResolver{transformer: nil}
|
||||||
|
_, err := mResolver.resolveEvent("event", nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "events transformer is nil")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("event is empty", func(t *testing.T) {
|
||||||
|
_, err := mResolver.resolveEvent("", nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "event name is empty")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("transformer returns error", func(t *testing.T) {
|
||||||
|
matcher := ia.NewNameMatcher(mEvent)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error"))
|
||||||
|
|
||||||
|
_, err := mResolver.resolveEvent(mEvent, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to transform perf events")
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no events transformed", func(t *testing.T) {
|
||||||
|
matcher := ia.NewNameMatcher(mEvent)
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil)
|
||||||
|
|
||||||
|
_, err := mResolver.resolveEvent(mEvent, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to resolve unknown event")
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("not valid qualifiers", func(t *testing.T) {
|
||||||
|
event := "mock event 1"
|
||||||
|
qualifiers := []string{"wrong modifiers"}
|
||||||
|
|
||||||
|
matcher := ia.NewNameMatcher(event)
|
||||||
|
mPerfEvent := &ia.PerfEvent{Name: event}
|
||||||
|
mPerfEvents := []*ia.PerfEvent{mPerfEvent}
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil)
|
||||||
|
|
||||||
|
_, err := mResolver.resolveEvent(event, qualifiers)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event))
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("successfully transformed", func(t *testing.T) {
|
||||||
|
event := "mock event 1"
|
||||||
|
qualifiers := []string{"config1=0x012h", "config2=0x034k"}
|
||||||
|
|
||||||
|
matcher := ia.NewNameMatcher(event)
|
||||||
|
|
||||||
|
mPerfEvent := &ia.PerfEvent{Name: event}
|
||||||
|
mPerfEvents := []*ia.PerfEvent{mPerfEvent}
|
||||||
|
|
||||||
|
expectedOptions, _ := ia.NewOptions().SetAttrModifiers(qualifiers).Build()
|
||||||
|
|
||||||
|
mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil)
|
||||||
|
|
||||||
|
customEvent, err := mResolver.resolveEvent(event, qualifiers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, mPerfEvent, customEvent.Event)
|
||||||
|
require.Equal(t, expectedOptions, customEvent.Options)
|
||||||
|
mTransformer.AssertExpectations(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue