fix(input.modbus): Add workaround to read field in separate requests (#12235)
This commit is contained in:
parent
0f8dff9b8d
commit
6cfae2a1fd
|
|
@ -218,14 +218,25 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
|
||||
## Enable workarounds required by some devices to work correctly
|
||||
# [inputs.modbus.workarounds]
|
||||
## Pause after connect delays the first request by the specified time. This might be necessary for (slow) devices.
|
||||
## Pause after connect delays the first request by the specified time.
|
||||
## This might be necessary for (slow) devices.
|
||||
# pause_after_connect = "0ms"
|
||||
## Pause between read requests sent to the device. This might be necessary for (slow) serial devices.
|
||||
|
||||
## Pause between read requests sent to the device.
|
||||
## This might be necessary for (slow) serial devices.
|
||||
# pause_between_requests = "0ms"
|
||||
## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain
|
||||
## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices)
|
||||
## from multiple instances you might want to only stay connected during gather and disconnect afterwards.
|
||||
|
||||
## Close the connection after every gather cycle.
|
||||
## Usually the plugin closes the connection after a certain idle-timeout,
|
||||
## however, if you query a device with limited simultaneous connectivity
|
||||
## (e.g. serial devices) from multiple instances you might want to only
|
||||
## stay connected during gather and disconnect afterwards.
|
||||
# close_connection_after_gather = false
|
||||
|
||||
## Force the plugin to read each field in a separate request.
|
||||
## This might be necessary for devices not conforming to the spec,
|
||||
## see https://github.com/influxdata/telegraf/issues/12071.
|
||||
# one_request_per_field = false
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ type ConfigurationOriginal struct {
|
|||
Coils []fieldDefinition `toml:"coils"`
|
||||
HoldingRegisters []fieldDefinition `toml:"holding_registers"`
|
||||
InputRegisters []fieldDefinition `toml:"input_registers"`
|
||||
workarounds ModbusWorkarounds
|
||||
}
|
||||
|
||||
func (c *ConfigurationOriginal) SampleConfigPart() string {
|
||||
|
|
@ -46,22 +47,35 @@ func (c *ConfigurationOriginal) Check() error {
|
|||
}
|
||||
|
||||
func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) {
|
||||
coil, err := c.initRequests(c.Coils, maxQuantityCoils)
|
||||
maxQuantity := uint16(1)
|
||||
if !c.workarounds.OnRequestPerField {
|
||||
maxQuantity = maxQuantityCoils
|
||||
}
|
||||
coil, err := c.initRequests(c.Coils, maxQuantity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
discrete, err := c.initRequests(c.DiscreteInputs, maxQuantityDiscreteInput)
|
||||
if !c.workarounds.OnRequestPerField {
|
||||
maxQuantity = maxQuantityDiscreteInput
|
||||
}
|
||||
discrete, err := c.initRequests(c.DiscreteInputs, maxQuantity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
holding, err := c.initRequests(c.HoldingRegisters, maxQuantityHoldingRegisters)
|
||||
if !c.workarounds.OnRequestPerField {
|
||||
maxQuantity = maxQuantityHoldingRegisters
|
||||
}
|
||||
holding, err := c.initRequests(c.HoldingRegisters, maxQuantity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
input, err := c.initRequests(c.InputRegisters, maxQuantityInputRegisters)
|
||||
if !c.workarounds.OnRequestPerField {
|
||||
maxQuantity = maxQuantityInputRegisters
|
||||
}
|
||||
input, err := c.initRequests(c.InputRegisters, maxQuantity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,8 @@ type requestDefinition struct {
|
|||
}
|
||||
|
||||
type ConfigurationPerRequest struct {
|
||||
Requests []requestDefinition `toml:"request"`
|
||||
Requests []requestDefinition `toml:"request"`
|
||||
workarounds ModbusWorkarounds
|
||||
}
|
||||
|
||||
func (c *ConfigurationPerRequest) SampleConfigPart() string {
|
||||
|
|
@ -162,16 +163,32 @@ func (c *ConfigurationPerRequest) Process() (map[byte]requestSet, error) {
|
|||
|
||||
switch def.RegisterType {
|
||||
case "coil":
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantityCoils, def.Optimization)
|
||||
maxQuantity := maxQuantityCoils
|
||||
if c.workarounds.OnRequestPerField {
|
||||
maxQuantity = 1
|
||||
}
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantity, def.Optimization)
|
||||
set.coil = append(set.coil, requests...)
|
||||
case "discrete":
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantityDiscreteInput, def.Optimization)
|
||||
maxQuantity := maxQuantityDiscreteInput
|
||||
if c.workarounds.OnRequestPerField {
|
||||
maxQuantity = 1
|
||||
}
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantity, def.Optimization)
|
||||
set.discrete = append(set.discrete, requests...)
|
||||
case "holding":
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantityHoldingRegisters, def.Optimization)
|
||||
maxQuantity := maxQuantityHoldingRegisters
|
||||
if c.workarounds.OnRequestPerField {
|
||||
maxQuantity = 1
|
||||
}
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantity, def.Optimization)
|
||||
set.holding = append(set.holding, requests...)
|
||||
case "input":
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantityInputRegisters, def.Optimization)
|
||||
maxQuantity := maxQuantityInputRegisters
|
||||
if c.workarounds.OnRequestPerField {
|
||||
maxQuantity = 1
|
||||
}
|
||||
requests := groupFieldsToRequests(fields, def.Tags, maxQuantity, def.Optimization)
|
||||
set.input = append(set.input, requests...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown register type %q", def.RegisterType)
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ type ModbusWorkarounds struct {
|
|||
AfterConnectPause config.Duration `toml:"pause_after_connect"`
|
||||
PollPause config.Duration `toml:"pause_between_requests"`
|
||||
CloseAfterGather bool `toml:"close_connection_after_gather"`
|
||||
OnRequestPerField bool `toml:"one_request_per_field"`
|
||||
}
|
||||
|
||||
// Modbus holds all data relevant to the plugin
|
||||
|
|
@ -113,8 +114,10 @@ func (m *Modbus) Init() error {
|
|||
var cfg Configuration
|
||||
switch m.ConfigurationType {
|
||||
case "", "register":
|
||||
m.ConfigurationOriginal.workarounds = m.Workarounds
|
||||
cfg = &m.ConfigurationOriginal
|
||||
case "request":
|
||||
m.ConfigurationPerRequest.workarounds = m.Workarounds
|
||||
cfg = &m.ConfigurationPerRequest
|
||||
default:
|
||||
return fmt.Errorf("unknown configuration type %q", m.ConfigurationType)
|
||||
|
|
|
|||
|
|
@ -2865,3 +2865,99 @@ func TestRequestOptimizationAggressive(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestsWorkaroundsOneRequestPerField(t *testing.T) {
|
||||
plugin := Modbus{
|
||||
Name: "Test",
|
||||
Controller: "tcp://localhost:1502",
|
||||
ConfigurationType: "request",
|
||||
Log: testutil.Logger{},
|
||||
Workarounds: ModbusWorkarounds{OnRequestPerField: true},
|
||||
}
|
||||
plugin.Requests = []requestDefinition{
|
||||
{
|
||||
SlaveID: 1,
|
||||
ByteOrder: "ABCD",
|
||||
RegisterType: "holding",
|
||||
Fields: []requestFieldDefinition{
|
||||
{
|
||||
Name: "holding-1",
|
||||
Address: uint16(1),
|
||||
InputType: "INT16",
|
||||
},
|
||||
{
|
||||
Name: "holding-2",
|
||||
Address: uint16(2),
|
||||
InputType: "INT16",
|
||||
},
|
||||
{
|
||||
Name: "holding-3",
|
||||
Address: uint16(3),
|
||||
InputType: "INT16",
|
||||
},
|
||||
{
|
||||
Name: "holding-4",
|
||||
Address: uint16(4),
|
||||
InputType: "INT16",
|
||||
},
|
||||
{
|
||||
Name: "holding-5",
|
||||
Address: uint16(5),
|
||||
InputType: "INT16",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
require.Len(t, plugin.requests[1].holding, len(plugin.Requests[0].Fields))
|
||||
}
|
||||
|
||||
func TestRegisterWorkaroundsOneRequestPerField(t *testing.T) {
|
||||
plugin := Modbus{
|
||||
Name: "Test",
|
||||
Controller: "tcp://localhost:1502",
|
||||
ConfigurationType: "register",
|
||||
Log: testutil.Logger{},
|
||||
Workarounds: ModbusWorkarounds{OnRequestPerField: true},
|
||||
}
|
||||
plugin.SlaveID = 1
|
||||
plugin.HoldingRegisters = []fieldDefinition{
|
||||
{
|
||||
ByteOrder: "AB",
|
||||
DataType: "INT16",
|
||||
Name: "holding-1",
|
||||
Address: []uint16{1},
|
||||
Scale: 1.0,
|
||||
},
|
||||
{
|
||||
ByteOrder: "AB",
|
||||
DataType: "INT16",
|
||||
Name: "holding-2",
|
||||
Address: []uint16{2},
|
||||
Scale: 1.0,
|
||||
},
|
||||
{
|
||||
ByteOrder: "AB",
|
||||
DataType: "INT16",
|
||||
Name: "holding-3",
|
||||
Address: []uint16{3},
|
||||
Scale: 1.0,
|
||||
},
|
||||
{
|
||||
ByteOrder: "AB",
|
||||
DataType: "INT16",
|
||||
Name: "holding-4",
|
||||
Address: []uint16{4},
|
||||
Scale: 1.0,
|
||||
},
|
||||
{
|
||||
ByteOrder: "AB",
|
||||
DataType: "INT16",
|
||||
Name: "holding-5",
|
||||
Address: []uint16{5},
|
||||
Scale: 1.0,
|
||||
},
|
||||
}
|
||||
require.NoError(t, plugin.Init())
|
||||
require.Len(t, plugin.requests[1].holding, len(plugin.HoldingRegisters))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,21 @@
|
|||
## Enable workarounds required by some devices to work correctly
|
||||
# [inputs.modbus.workarounds]
|
||||
## Pause after connect delays the first request by the specified time. This might be necessary for (slow) devices.
|
||||
## Pause after connect delays the first request by the specified time.
|
||||
## This might be necessary for (slow) devices.
|
||||
# pause_after_connect = "0ms"
|
||||
## Pause between read requests sent to the device. This might be necessary for (slow) serial devices.
|
||||
|
||||
## Pause between read requests sent to the device.
|
||||
## This might be necessary for (slow) serial devices.
|
||||
# pause_between_requests = "0ms"
|
||||
## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain
|
||||
## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices)
|
||||
## from multiple instances you might want to only stay connected during gather and disconnect afterwards.
|
||||
|
||||
## Close the connection after every gather cycle.
|
||||
## Usually the plugin closes the connection after a certain idle-timeout,
|
||||
## however, if you query a device with limited simultaneous connectivity
|
||||
## (e.g. serial devices) from multiple instances you might want to only
|
||||
## stay connected during gather and disconnect afterwards.
|
||||
# close_connection_after_gather = false
|
||||
|
||||
## Force the plugin to read each field in a separate request.
|
||||
## This might be necessary for devices not conforming to the spec,
|
||||
## see https://github.com/influxdata/telegraf/issues/12071.
|
||||
# one_request_per_field = false
|
||||
|
|
|
|||
Loading…
Reference in New Issue