initial checkin

This commit is contained in:
Pim van Pelt
2021-09-05 15:13:12 +00:00
commit 51eee915bf
16 changed files with 1998 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
build/
dist
__pycache__

7
CONTRIBUTORS Normal file
View File

@ -0,0 +1,7 @@
For pyagentx:
Rayed Alrashed <rayed@rayed.com>
Daniel Heule
Mohammed Alshohayeb <moshohayeb@gmail.com>
For the plugin itself:
Pim van Pelt <pim@ipng.nl>

23
LICENSE Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2013, Rayed A Alrashed
Copyright (c) 2021, Pim van Pelt <pim@ipng.nl>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

50
README.md Normal file
View File

@ -0,0 +1,50 @@
# VPP's Interface AgentX
This is an SNMP agent that implements the [Agentx](https://datatracker.ietf.org/doc/html/rfc2257)
protocol. It connects to VPP's `statseg` (statistics memory segment) by MMAPing
it, so the user running the agent must have read access to `/run/vpp/stats.sock`.
It then connects to SNMP's `agentx` socket, which can be either a TCP socket
(by default localhost:705), or a unix domain socket (by default /var/agentx/master)
the latter being readable only by root. It's preferable to run as unprivileged user
The agent incorporates [pyagentx](https://github.com/hosthvo/pyagentx) with a few
changes, and is released with the BSD 2-clause license.
## Running
First, configure the snmpd to accept agentx connections by adding the following
to `snmpd.conf`:
```
master agentx
agentXSocket tcp:localhost:705,unix:/var/agentx-dataplane/master
```
and restart snmpd to pick up the changes. Simply run `./vpp-snmp-agent.py` and it
will connect to the snmpd on localhost:705, and expose the IFMib by periodically
polling VPP. Observe the console output.
## Building
Install `pyinstaller` to build a binary distribution
```
sudo pip install pyinstaller
pyinstaller vpp-snmp-agent.py --onefile
dist/vpp-snmp-agent
sudo mkdir -p /usr/local/sbin/
sudo cp dist/vpp-snmp-agent /usr/local/sbin/
```
## Running in production
Meant to be run on Ubuntu, copy `vpp-snmp-agent.service`, enable and start:
```
sudo cp snmpd-dataplane.service /usr/lib/systemd/system/
sudo cp vpp-snmp-agent.service /usr/lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable snmpd-dataplane
sudo systemctl start snmpd-dataplane
sudo systemctl enable vpp-snmp-agent
sudo systemctl start vpp-snmp-agent
```

1
pyagentx/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
__pycache__

159
pyagentx/__init__.py Normal file
View File

@ -0,0 +1,159 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
import logging
from pyagentx.updater import Updater
from pyagentx.agent import Agent
from pyagentx.sethandler import SetHandler, SetHandlerError
def setup_logging(debug=False):
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger = logging.getLogger('pyagentx')
logger.setLevel(level)
# formatter = logging.Formatter('%(asctime)s - %(name)20s - %(levelname)s - %(message)s')
formatter = logging.Formatter(
'[%(levelname)-8s] %(name)17s - %(funcName)-15s: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
SOCKET_PATH = "/var/agentx/master"
AGENTX_EMPTY_PDU = 1
AGENTX_OPEN_PDU = 1
AGENTX_CLOSE_PDU = 2
AGENTX_REGISTER_PDU = 3
AGENTX_UNREGISTER_PDU = 4
AGENTX_GET_PDU = 5
AGENTX_GETNEXT_PDU = 6
AGENTX_GETBULK_PDU = 7
AGENTX_TESTSET_PDU = 8
AGENTX_COMMITSET_PDU = 9
AGENTX_UNDOSET_PDU = 10
AGENTX_CLEANUPSET_PDU = 11
AGENTX_NOTIFY_PDU = 12
AGENTX_PING_PDU = 13
AGENTX_INDEXALLOCATE_PDU = 14
AGENTX_INDEXDEALLOCATE_PDU = 15
AGENTX_ADDAGENTCAPS_PDU = 16
AGENTX_REMOVEAGENTCAPS_PDU = 17
AGENTX_RESPONSE_PDU = 18
PDU_TYPE_NAME = {}
PDU_TYPE_NAME[0] = "EMPTY_PDU"
PDU_TYPE_NAME[1] = "OPEN_PDU"
PDU_TYPE_NAME[2] = "CLOSE_PDU"
PDU_TYPE_NAME[3] = "REGISTER_PDU"
PDU_TYPE_NAME[4] = "UNREGISTER_PDU"
PDU_TYPE_NAME[5] = "GET_PDU"
PDU_TYPE_NAME[6] = "GETNEXT_PDU"
PDU_TYPE_NAME[7] = "GETBULK_PDU"
PDU_TYPE_NAME[8] = "TESTSET_PDU"
PDU_TYPE_NAME[9] = "COMMITSET_PDU"
PDU_TYPE_NAME[10] = "UNDOSET_PDU"
PDU_TYPE_NAME[11] = "CLEANUPSET_PDU"
PDU_TYPE_NAME[12] = "NOTIFY_PDU"
PDU_TYPE_NAME[13] = "PING_PDU"
PDU_TYPE_NAME[14] = "INDEXALLOCATE_PDU"
PDU_TYPE_NAME[15] = "INDEXDEALLOCATE_PDU"
PDU_TYPE_NAME[16] = "ADDAGENTCAPS_PDU"
PDU_TYPE_NAME[17] = "REMOVEAGENTCAPS_PDU"
PDU_TYPE_NAME[18] = "RESPONSE_PDU"
TYPE_INTEGER = 2
TYPE_OCTETSTRING = 4
TYPE_NULL = 5
TYPE_OBJECTIDENTIFIER = 6
TYPE_IPADDRESS = 64
TYPE_COUNTER32 = 65
TYPE_GAUGE32 = 66
TYPE_TIMETICKS = 67
TYPE_OPAQUE = 68
TYPE_COUNTER64 = 70
TYPE_NOSUCHOBJECT = 128
TYPE_NOSUCHINSTANCE = 129
TYPE_ENDOFMIBVIEW = 130
TYPE_NAME = {}
TYPE_NAME[2] = "INTEGER"
TYPE_NAME[4] = "OCTETSTRING"
TYPE_NAME[5] = "NULL"
TYPE_NAME[6] = "OBJECTIDENTIFIER"
TYPE_NAME[64] = "IPADDRESS"
TYPE_NAME[65] = "COUNTER32"
TYPE_NAME[66] = "GAUGE32"
TYPE_NAME[67] = "TIMETICKS"
TYPE_NAME[68] = "OPAQUE"
TYPE_NAME[70] = "COUNTER64"
TYPE_NAME[128] = "NOSUCHOBJECT"
TYPE_NAME[129] = "NOSUCHINSTANCE"
TYPE_NAME[130] = "ENDOFMIBVIEW"
ERROR_NOAGENTXERROR = 0
ERROR_GENERR = 5
ERROR_NOACCESS = 6
ERROR_WRONGTYPE = 7
ERROR_WRONGLENGTH = 8
ERROR_WRONGENCODING = 9
ERROR_WRONGVALUE = 10
ERROR_NOCREATION = 11
ERROR_INCONSISTENTVALUE = 12
ERROR_RESOURCEUNAVAILABLE = 13
ERROR_COMMITFAILED = 14
ERROR_UNDOFAILED = 15
ERROR_NOTWRITABLE = 17
ERROR_INCONSISTENTNAME = 18
ERROR_OPENFAILED = 256
ERROR_NOTOPEN = 257
ERROR_INDEXWRONGTYPE = 258
ERROR_INDEXALREADYALLOCATED = 259
ERROR_INDEXNONEAVAILABLE = 260
ERROR_INDEXNOTALLOCATED = 261
ERROR_UNSUPPORTEDCONTEXT = 262
ERROR_DUPLICATEREGISTRATION = 263
ERROR_UNKNOWNREGISTRATION = 264
ERROR_UNKNOWNAGENTCAPS = 265
ERROR_PARSEERROR = 266
ERROR_REQUESTDENIED = 267
ERROR_PROCESSINGERROR = 268
ERROR_NAMES = {}
ERROR_NAMES[0] = "NOAGENTXERROR"
ERROR_NAMES[5] = "GENERR"
ERROR_NAMES[6] = "NOACCESS"
ERROR_NAMES[7] = "WRONGTYPE"
ERROR_NAMES[8] = "WRONGLENGTH"
ERROR_NAMES[9] = "WRONGENCODING"
ERROR_NAMES[10] = "WRONGVALUE"
ERROR_NAMES[11] = "NOCREATION"
ERROR_NAMES[12] = "INCONSISTENTVALUE"
ERROR_NAMES[13] = "RESOURCEUNAVAILABLE"
ERROR_NAMES[14] = "ERROR_COMMITFAILED"
ERROR_NAMES[15] = "ERROR_UNDOFAILED"
ERROR_NAMES[17] = "NOTWRITABLE"
ERROR_NAMES[18] = "INCONSISTENTNAME"
ERROR_NAMES[256] = "OPENFAILED"
ERROR_NAMES[257] = "NOTOPEN"
ERROR_NAMES[258] = "INDEXWRONGTYPE"
ERROR_NAMES[259] = "INDEXALREADYALLOCATED"
ERROR_NAMES[260] = "INDEXNONEAVAILABLE"
ERROR_NAMES[261] = "INDEXNOTALLOCATED"
ERROR_NAMES[262] = "UNSUPPORTEDCONTEXT"
ERROR_NAMES[263] = "DUPLICATEREGISTRATION"
ERROR_NAMES[264] = "UNKNOWNREGISTRATION"
ERROR_NAMES[265] = "UNKNOWNAGENTCAPS"
ERROR_NAMES[266] = "PARSEERROR"
ERROR_NAMES[267] = "REQUESTDENIED"
ERROR_NAMES[268] = "PROCESSINGERROR"

96
pyagentx/agent.py Normal file
View File

@ -0,0 +1,96 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.agent')
logger.addHandler(NullHandler())
# --------------------------------------------
import time
import inspect
try:
import queue
except ImportError:
import Queue as queue
import pyagentx
from pyagentx.updater import Updater
from pyagentx.network import Network
class AgentError(Exception):
pass
class Agent(object):
def __init__(self):
self._updater_list = []
self._sethandlers = {}
self._threads = []
def register(self, oid, class_, freq=10):
if Updater not in inspect.getmro(class_):
raise AgentError('Class given isn\'t an updater')
# cleanup and test oid
try:
oid = oid.strip(' .')
[int(i) for i in oid.split('.')]
except ValueError:
raise AgentError('OID isn\'t valid')
self._updater_list.append({'oid': oid, 'class': class_, 'freq': freq})
def register_set(self, oid, class_):
if pyagentx.SetHandler not in class_.__bases__:
raise AgentError('Class given isn\'t a SetHandler')
# cleanup and test oid
try:
oid = oid.strip(' .')
[int(i) for i in oid.split('.')]
except ValueError:
raise AgentError('OID isn\'t valid')
self._sethandlers[oid] = class_()
def setup(self):
# Override this
pass
def start(self):
update_queue = queue.Queue(maxsize=20)
self.setup()
# Start Updaters
for u in self._updater_list:
logger.debug('Starting updater [%s]' % u['oid'])
t = u['class']()
t.agent_setup(update_queue, u['oid'], u['freq'])
t.start()
self._threads.append(t)
# Start Network
oid_list = [u['oid'] for u in self._updater_list]
t = Network(update_queue, oid_list, self._sethandlers)
t.start()
self._threads.append(t)
# Do nothing ... just wait for someone to stop you
while True:
#logger.debug('Agent Sleeping ...')
time.sleep(1)
def stop(self):
logger.debug('Stop threads')
for t in self._threads:
t.stop.set()
logger.debug('Wait for updater')
for t in self._threads:
t.join()

268
pyagentx/network.py Normal file
View File

@ -0,0 +1,268 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.network')
logger.addHandler(NullHandler())
# --------------------------------------------
import socket
import time
import threading
try:
import queue
except ImportError:
import Queue as queue
import pyagentx
from pyagentx.pdu import PDU
class Network(threading.Thread):
def __init__(self, update_queue, oid_list, sethandlers):
threading.Thread.__init__(self)
self.stop = threading.Event()
self._queue = update_queue
self._oid_list = oid_list
self._sethandlers = sethandlers
self.session_id = 0
self.transaction_id = 0
self.debug = 1
# Data Related Variables
self.data = {}
self.data_idx = []
def _connect(self):
while True:
try:
# self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# self.socket.connect(pyagentx.SOCKET_PATH)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("localhost",705))
self.socket.settimeout(0.1)
return
except socket.error:
logger.error("Failed to connect, sleeping and retrying later")
time.sleep(2)
def new_pdu(self, type):
pdu = PDU(type)
pdu.session_id = self.session_id
pdu.transaction_id = self.transaction_id
self.transaction_id += 1
return pdu
def response_pdu(self, org_pdu):
pdu = PDU(pyagentx.AGENTX_RESPONSE_PDU)
pdu.session_id = org_pdu.session_id
pdu.transaction_id = org_pdu.transaction_id
pdu.packet_id = org_pdu.packet_id
return pdu
def send_pdu(self, pdu):
if self.debug: pdu.dump()
self.socket.send(pdu.encode())
def recv_pdu(self):
buf = self.socket.recv(1024)
if not buf: return None
pdu = PDU()
pdu.decode(buf)
if self.debug: pdu.dump()
return pdu
# =========================================
def _get_updates(self):
while True:
try:
item = self._queue.get_nowait()
logger.debug('New update')
update_oid = item['oid']
update_data = item['data']
# clear values with prefix oid
for oid in list(self.data.keys()):
if oid.startswith(update_oid):
del (self.data[oid])
# insert updated value
for row in update_data.values():
oid = "%s.%s" % (update_oid, row['name'])
self.data[oid] = {
'name': oid,
'type': row['type'],
'value': row['value']
}
# recalculate reverse index if data changed
self.data_idx = sorted(
self.data.keys(),
key=lambda k: tuple(int(part) for part in k.split('.')))
except queue.Empty:
break
def _get_next_oid(self, oid, endoid):
if oid in self.data:
# Exact match found
#logger.debug('get_next_oid, exact match of %s' % oid)
idx = self.data_idx.index(oid)
if idx == (len(self.data_idx) - 1):
# Last Item in MIB, No match!
return None
return self.data_idx[idx + 1]
else:
# No exact match, find prefix
#logger.debug('get_next_oid, no exact match of %s' % oid)
slist = oid.split('.')
elist = endoid.split('.')
for tmp_oid in self.data_idx:
tlist = tmp_oid.split('.')
for i in range(len(tlist)):
try:
sok = int(slist[i]) <= int(tlist[i])
eok = int(elist[i]) >= int(tlist[i])
if not (sok and eok):
break
except IndexError:
pass
if sok and eok:
return tmp_oid
return None # No match!
def start(self):
while True:
try:
self._start_network()
except socket.error:
logger.error("Network error, master disconnect?!")
def _start_network(self):
self._connect()
logger.info("==== Open PDU ====")
pdu = self.new_pdu(pyagentx.AGENTX_OPEN_PDU)
self.send_pdu(pdu)
pdu = self.recv_pdu()
self.session_id = pdu.session_id
logger.info("==== Ping PDU ====")
pdu = self.new_pdu(pyagentx.AGENTX_PING_PDU)
self.send_pdu(pdu)
pdu = self.recv_pdu()
logger.info("==== Register PDU ====")
for oid in self._oid_list:
logger.info("Registering: %s" % (oid))
pdu = self.new_pdu(pyagentx.AGENTX_REGISTER_PDU)
pdu.oid = oid
self.send_pdu(pdu)
pdu = self.recv_pdu()
logger.info("==== Waiting for PDU ====")
while True:
try:
self._get_updates()
request = self.recv_pdu()
except socket.timeout:
continue
if not request:
logger.error("Empty PDU, connection closed!")
raise socket.error
response = self.response_pdu(request)
if request.type == pyagentx.AGENTX_GET_PDU:
logger.debug("Received GET PDU")
for rvalue in request.range_list:
oid = rvalue[0]
logger.debug("OID: %s" % (oid))
if oid in self.data:
logger.debug("OID Found")
response.values.append(self.data[oid])
else:
logger.debug("OID Not Found!")
response.values.append({
'type': pyagentx.TYPE_NOSUCHOBJECT,
'name': rvalue[0],
'value': 0
})
elif request.type == pyagentx.AGENTX_GETNEXT_PDU:
logger.debug("Received GET_NEXT PDU")
for rvalue in request.range_list:
oid = self._get_next_oid(rvalue[0], rvalue[1])
logger.debug("GET_NEXT: %s => %s" % (rvalue[0], oid))
if oid:
response.values.append(self.data[oid])
else:
response.values.append({
'type': pyagentx.TYPE_ENDOFMIBVIEW,
'name': rvalue[0],
'value': 0
})
elif request.type == pyagentx.AGENTX_TESTSET_PDU:
logger.info("Received TESTSET PDU")
idx = 0
for row in request.values:
idx += 1
oid = row['name']
type_ = pyagentx.TYPE_NAME.get(row['type'], 'Unknown type')
value = row['data']
logger.info("Name: [%s] Type: [%s] Value: [%s]" %
(oid, type_, value))
# Find matching sethandler
matching_oid = ''
for target_oid in self._sethandlers:
if oid.startswith(target_oid):
matching_oid = target_oid
break
if matching_oid == '':
logger.debug(
'TestSet request failed: not writeable #%s' % idx)
response.error = pyagentx.ERROR_NOTWRITABLE
response.error_index = idx
break
try:
self._sethandlers[matching_oid].network_test(
request.session_id, request.transaction_id, oid,
row['data'])
except pyagentx.SetHandlerError:
logger.debug(
'TestSet request failed: wrong value #%s' % idx)
response.error = pyagentx.ERROR_WRONGVALUE
response.error_index = idx
break
logger.debug('TestSet request passed')
elif request.type == pyagentx.AGENTX_COMMITSET_PDU:
for handler in self._sethandlers.values():
handler.network_commit(request.session_id,
request.transaction_id)
logger.info("Received COMMITSET PDU")
elif request.type == pyagentx.AGENTX_UNDOSET_PDU:
for handler in self._sethandlers.values():
handler.network_undo(request.session_id,
request.transaction_id)
logger.info("Received UNDOSET PDU")
elif request.type == pyagentx.AGENTX_CLEANUPSET_PDU:
for handler in self._sethandlers.values():
handler.network_cleanup(request.session_id,
request.transaction_id)
logger.info("Received CLEANUP PDU")
self.send_pdu(response)

313
pyagentx/pdu.py Normal file
View File

@ -0,0 +1,313 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.pdu')
logger.addHandler(NullHandler())
# --------------------------------------------
import struct
import pprint
import pyagentx
class PDU(object):
def __init__(self, type=0):
self.type = type
self.session_id = 0
self.transaction_id = 0
self.packet_id = 0
self.error = pyagentx.ERROR_NOAGENTXERROR
self.error_index = 0
self.decode_buf = ''
self.state = {}
self.values = []
def dump(self):
name = pyagentx.PDU_TYPE_NAME[self.type]
logger.debug('PDU DUMP: New PDU')
logger.debug(
'PDU DUMP: Meta : [%s: %d %d %d]' %
(name, self.session_id, self.transaction_id, self.packet_id))
if 'payload_length' in self.state:
logger.debug('PDU DUMP: Length : %s' %
self.state['payload_length'])
if hasattr(self, 'response'):
logger.debug('PDU DUMP: Response : %s' % self.response)
if hasattr(self, 'values'):
logger.debug('PDU DUMP: Values : %s' %
pprint.pformat(self.values))
if hasattr(self, 'range_list'):
logger.debug('PDU DUMP: Range list: %s' %
pprint.pformat(self.range_list))
# ====================================================
# encode functions
def encode_oid(self, oid, include=0):
oid = oid.strip()
oid = oid.split('.')
oid = [int(i) for i in oid]
if len(oid) > 5 and oid[:4] == [1, 3, 6, 1]:
# prefix
prefix = oid[4]
oid = oid[5:]
else:
# no prefix
prefix = 0
buf = struct.pack('BBBB', len(oid), prefix, include, 0)
for i in range(len(oid)):
buf += struct.pack('!L', oid[i])
return buf
def encode_octet(self, octet):
octet = octet.encode("utf-8")
buf = struct.pack('!L', len(octet))
buf += octet
padding = (4 - (len(octet) % 4)) % 4
buf += chr(0).encode() * padding
return buf
def encode_value(self, type, name, value):
buf = struct.pack('!HH', type, 0)
buf += self.encode_oid(name)
if type in [pyagentx.TYPE_INTEGER]:
buf += struct.pack('!l', value)
elif type in [
pyagentx.TYPE_COUNTER32, pyagentx.TYPE_GAUGE32,
pyagentx.TYPE_TIMETICKS
]:
buf += struct.pack('!L', value)
elif type in [pyagentx.TYPE_COUNTER64]:
buf += struct.pack('!Q', value)
elif type in [pyagentx.TYPE_OBJECTIDENTIFIER]:
buf += self.encode_oid(value)
elif type in [
pyagentx.TYPE_IPADDRESS, pyagentx.TYPE_OPAQUE,
pyagentx.TYPE_OCTETSTRING
]:
buf += self.encode_octet(value)
elif type in [
pyagentx.TYPE_NULL, pyagentx.TYPE_NOSUCHOBJECT,
pyagentx.TYPE_NOSUCHINSTANCE, pyagentx.TYPE_ENDOFMIBVIEW
]:
# No data
pass
else:
logger.error('Unknown Type:' % type)
return buf
def encode_header(self, pdu_type, payload_length=0, flags=0):
flags = flags | 0x10 # Bit 5 = all ints in NETWORK_BYTE_ORDER
buf = struct.pack('BBBB', 1, pdu_type, flags, 0)
buf += struct.pack('!L', self.session_id) # sessionID
buf += struct.pack('!L', self.transaction_id) # transactionID
buf += struct.pack('!L', self.packet_id) # packetID
buf += struct.pack('!L', payload_length)
return buf
def encode(self):
buf = b''
if self.type == pyagentx.AGENTX_OPEN_PDU:
# timeout
buf += struct.pack('!BBBB', 5, 0, 0, 0)
# agent OID
buf += struct.pack('!L', 0)
# Agent Desc
buf += self.encode_octet('MyAgent')
elif self.type == pyagentx.AGENTX_PING_PDU:
# No extra data
pass
elif self.type == pyagentx.AGENTX_REGISTER_PDU:
range_subid = 0
timeout = 5
priority = 127
buf += struct.pack('BBBB', timeout, priority, range_subid, 0)
# Sub Tree
buf += self.encode_oid(self.oid)
elif self.type == pyagentx.AGENTX_RESPONSE_PDU:
buf += struct.pack('!LHH', 0, self.error, self.error_index)
for value in self.values:
buf += self.encode_value(value['type'], value['name'],
value['value'])
else:
# Unsupported PDU type
pass
return self.encode_header(self.type, len(buf)) + buf
# ====================================================
# decode functions
def set_decode_buf(self, buf):
self.decode_buf = buf
def decode_oid(self):
try:
t = struct.unpack('!BBBB', self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
ret = {
'n_subid': t[0],
'prefix': t[1],
'include': t[2],
'reserved': t[3],
}
sub_ids = []
if ret['prefix']:
sub_ids = [1, 3, 6, 1]
sub_ids.append(ret['prefix'])
for i in range(ret['n_subid']):
t = struct.unpack('!L', self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
sub_ids.append(t[0])
oid = '.'.join(str(i) for i in sub_ids)
return oid, ret['include']
except Exception as e:
logger.exception('Invalid packing OID header')
logger.debug('%s' % pprint.pformat(self.decode_buf))
def decode_search_range(self):
start_oid, include = self.decode_oid()
if start_oid == []:
return [], [], 0
end_oid, _ = self.decode_oid()
return start_oid, end_oid, include
def decode_search_range_list(self):
range_list = []
while len(self.decode_buf):
range_list.append(self.decode_search_range())
return range_list
def decode_octet(self):
try:
t = struct.unpack('!L', self.decode_buf[:4])
l = t[0]
self.decode_buf = self.decode_buf[4:]
padding = 4 - (l % 4)
buf = self.decode_buf[:l]
self.decode_buf = self.decode_buf[l + padding:]
return buf
except Exception as e:
logger.exception('Invalid packing octet header')
def decode_value(self):
try:
vtype, _ = struct.unpack('!HH', self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
except Exception as e:
logger.exception('Invalid packing value header')
oid, _ = self.decode_oid()
if vtype in [
pyagentx.TYPE_INTEGER, pyagentx.TYPE_COUNTER32,
pyagentx.TYPE_GAUGE32, pyagentx.TYPE_TIMETICKS
]:
data = struct.unpack('!L', self.decode_buf[:4])
data = data[0]
self.decode_buf = self.decode_buf[4:]
elif vtype in [pyagentx.TYPE_COUNTER64]:
data = struct.unpack('!Q', self.decode_buf[:8])
data = data[0]
self.decode_buf = self.decode_buf[8:]
elif vtype in [pyagentx.TYPE_OBJECTIDENTIFIER]:
data, _ = self.decode_oid()
elif vtype in [
pyagentx.TYPE_IPADDRESS, pyagentx.TYPE_OPAQUE,
pyagentx.TYPE_OCTETSTRING
]:
data = self.decode_octet()
elif vtype in [
pyagentx.TYPE_NULL, pyagentx.TYPE_NOSUCHOBJECT,
pyagentx.TYPE_NOSUCHINSTANCE, pyagentx.TYPE_ENDOFMIBVIEW
]:
# No data
data = None
else:
logger.error('Unknown Type: %s' % vtype)
return {'type': vtype, 'name': oid, 'data': data}
def decode_header(self):
try:
t = struct.unpack('!BBBBLLLL', self.decode_buf[:20])
self.decode_buf = self.decode_buf[20:]
ret = {
'version': t[0],
'pdu_type': t[1],
'pdu_type_name': pyagentx.PDU_TYPE_NAME[t[1]],
'flags': t[2],
'reserved': t[3],
'session_id': t[4],
'transaction_id': t[5],
'packet_id': t[6],
'payload_length': t[7],
}
self.state = ret
self.type = ret['pdu_type']
self.session_id = ret['session_id']
self.packet_id = ret['packet_id']
self.transaction_id = ret['transaction_id']
self.decode_buf = self.decode_buf[:ret['payload_length']]
if ret['flags'] & 0x08: # content present
context = self.decode_octet()
logger.debug('Context: %s' % context)
return ret
except Exception as e:
logger.exception('Invalid packing: %d' % len(self.decode_buf))
logger.debug('%s' % pprint.pformat(self.decode_buf))
def decode(self, buf):
self.set_decode_buf(buf)
ret = self.decode_header()
if ret['pdu_type'] == pyagentx.AGENTX_RESPONSE_PDU:
# Decode Response Header
t = struct.unpack('!LHH', self.decode_buf[:8])
self.decode_buf = self.decode_buf[8:]
self.response = {
'sysUpTime': t[0],
'error': t[1],
'error_name': pyagentx.ERROR_NAMES[t[1]],
'index': t[2],
}
# Decode VarBindList
self.values = []
while len(self.decode_buf):
self.values.append(self.decode_value())
elif ret['pdu_type'] == pyagentx.AGENTX_GET_PDU:
self.range_list = self.decode_search_range_list()
elif ret['pdu_type'] == pyagentx.AGENTX_GETNEXT_PDU:
self.range_list = self.decode_search_range_list()
elif ret['pdu_type'] == pyagentx.AGENTX_TESTSET_PDU:
# Decode VarBindList
self.values = []
while len(self.decode_buf):
self.values.append(self.decode_value())
elif ret['pdu_type'] in [
pyagentx.AGENTX_COMMITSET_PDU, pyagentx.AGENTX_UNDOSET_PDU,
pyagentx.AGENTX_CLEANUPSET_PDU
]:
pass
else:
pdu_type_str = pyagentx.PDU_TYPE_NAME.get(
ret['pdu_type'], 'Unknown:' + str(ret['pdu_type']))
logger.error('Unsupported PDU type:' + pdu_type_str)

67
pyagentx/sethandler.py Normal file
View File

@ -0,0 +1,67 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.sethandler')
logger.addHandler(NullHandler())
# --------------------------------------------
class SetHandlerError(Exception):
pass
class SetHandler(object):
def __init__(self):
self.transactions = {}
def network_test(self, session_id, transaction_id, oid, data):
tid = "%s_%s" % (session_id, transaction_id)
if tid in self.transactions:
del (self.transactions[tid])
try:
self.test(oid, data)
self.transactions[tid] = oid, data
except SetHandler as e:
logger.error('TestSet failed')
raise e
def network_commit(self, session_id, transaction_id):
tid = "%s_%s" % (session_id, transaction_id)
try:
oid, data = self.transactions[tid]
self.commit(oid, data)
if tid in self.transactions:
del (self.transactions[tid])
except:
logger.error('CommitSet failed')
def network_undo(self, session_id, transaction_id):
tid = "%s_%s" % (session_id, transaction_id)
if tid in self.transactions:
del (self.transactions[tid])
def network_cleanup(self, session_id, transaction_id):
tid = "%s_%s" % (session_id, transaction_id)
if tid in self.transactions:
del (self.transactions[tid])
# User override these
def test(self, oid, data):
pass
def commit(self, oid, data):
pass

137
pyagentx/updater.py Normal file
View File

@ -0,0 +1,137 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
)
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.updater')
logger.addHandler(NullHandler())
# --------------------------------------------
import time
import threading
try:
import queue
except ImportError:
import Queue as queue
import pyagentx
class Updater(threading.Thread):
def agent_setup(self, queue, oid, freq):
self.stop = threading.Event()
self._queue = queue
self._oid = oid
self._freq = freq
self._data = {}
def run(self):
start_time = 0
while True:
if self.stop.is_set(): break
now = time.time()
if now - start_time > self._freq:
logger.info('Updating : %s (%s)' %
(self.__class__.__name__, self._oid))
start_time = now
self._data = {}
try:
self.update()
self._queue.put_nowait({
'oid': self._oid,
'data': self._data
})
except queue.Full:
logger.error('Queue full')
except:
logger.exception('Unhandled update exception')
time.sleep(0.1)
logger.info('Updater stopping')
# Override this
def update(self):
pass
def set_INTEGER(self, oid, value):
logger.debug('Setting INTEGER %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_INTEGER,
'value': value
}
def set_OCTETSTRING(self, oid, value):
logger.debug('Setting OCTETSTRING %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_OCTETSTRING,
'value': value
}
def set_OBJECTIDENTIFIER(self, oid, value):
logger.debug('Setting OBJECTIDENTIFIER %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_OBJECTIDENTIFIER,
'value': value
}
def set_IPADDRESS(self, oid, value):
logger.debug('Setting IPADDRESS %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_IPADDRESS,
'value': value
}
def set_COUNTER32(self, oid, value):
logger.debug('Setting COUNTER32 %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_COUNTER32,
'value': value
}
def set_GAUGE32(self, oid, value):
logger.debug('Setting GAUGE32 %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_GAUGE32,
'value': value
}
def set_TIMETICKS(self, oid, value):
logger.debug('Setting TIMETICKS %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_TIMETICKS,
'value': value
}
def set_OPAQUE(self, oid, value):
logger.debug('Setting OPAQUE %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_OPAQUE,
'value': value
}
def set_COUNTER64(self, oid, value):
logger.debug('Setting COUNTER64 %s = %s' % (oid, value))
self._data[oid] = {
'name': oid,
'type': pyagentx.TYPE_COUNTER64,
'value': value
}

13
snmpd-dataplane.service Normal file
View File

@ -0,0 +1,13 @@
[Unit]
Description=Simple Network Management Protocol (SNMP) Daemon.
After=network.target
ConditionPathExists=/etc/snmp/snmpd.conf
[Service]
Type=simple
ExecStartPre=/bin/mkdir -p /var/run/agentx-dataplane/
ExecStart=/sbin/ip netns exec dataplane /usr/sbin/snmpd -LOw -u Debian-snmp -g vpp -I -smux,mteTrigger,mteTriggerConf -f -p /run/snmpd-dataplane.pid
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target

317
vpp-snmp-agent.py Executable file
View File

@ -0,0 +1,317 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from vppstats import VPPStats
import time
import pyagentx
import logging
import threading
vppstat_lastread = 0
vppstat_ifstat = {
'ifNames': [],
'ifHCInOctets': [],
'ifHCInUcastPkts': [],
'ifHCInMulticastPkts': [],
'ifHCInBroadcastPkts': [],
'ifHCOutOctets': [],
'ifHCOutUcastPkts': [],
'ifHCOutMulticastPkts': [],
'ifHCOutBroadcastPkts': [],
'ifHighSpeed': []
}
vppstat_lock = threading.Lock()
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx.vpp')
logger.addHandler(NullHandler())
def vppstat_update():
global vppstat_lastread, vppstat_lock, vppstat_ifstat, logger
vppstat_lock.acquire()
try:
if time.time() - vppstat_lastread < 9.0:
logger.debug("Skipping, cache still fresh")
vppstat_lock.release()
return
logger.info("Fetching interface data from VPP")
vppstat = VPPStats(socketname='/run/vpp/stats.sock', timeout=2)
vppstat.connect()
vppstat_ifstat['ifNames'] = vppstat['/if/names']
vppstat_ifstat['ifHCInOctets'].clear()
vppstat_ifstat['ifHCInUcastPkts'].clear()
vppstat_ifstat['ifHCInMulticastPkts'].clear()
vppstat_ifstat['ifHCInBroadcastPkts'].clear()
vppstat_ifstat['ifHCOutOctets'].clear()
vppstat_ifstat['ifHCOutUcastPkts'].clear()
vppstat_ifstat['ifHCOutMulticastPkts'].clear()
vppstat_ifstat['ifHCOutBroadcastPkts'].clear()
vppstat_ifstat['ifHighSpeed'].clear()
for i in range(len(vppstat_ifstat['ifNames'])):
vppstat_ifstat['ifHCInOctets'].append(
vppstat['/if/rx'][:, i].sum_octets())
vppstat_ifstat['ifHCInUcastPkts'].append(
vppstat['/if/rx'][:, i].sum_packets())
vppstat_ifstat['ifHCInMulticastPkts'].append(
vppstat['/if/rx-multicast'][:, i].sum_packets())
vppstat_ifstat['ifHCInBroadcastPkts'].append(
vppstat['/if/rx-broadcast'][:, i].sum_packets())
vppstat_ifstat['ifHCOutOctets'].append(
vppstat['/if/tx'][:, i].sum_octets())
vppstat_ifstat['ifHCOutUcastPkts'].append(
vppstat['/if/tx'][:, i].sum_packets())
vppstat_ifstat['ifHCOutMulticastPkts'].append(
vppstat['/if/tx-multicast'][:, i].sum_packets())
vppstat_ifstat['ifHCOutBroadcastPkts'].append(
vppstat['/if/tx-broadcast'][:, i].sum_packets())
# TODO(pim) retrieve from vpp_papi
# IF-MIB::ifHighSpeed.2 = Gauge32: 1000
vppstat_ifstat['ifHighSpeed'].append(1000)
# TODO(pim) retrieve from linux namespace if present
# IF-MIB::ifAlias.2 = STRING: Infra: nikhef-core-1.nl.switch.coloclue.net e1/34
# Initializing these to defaults:
# IF-MIB::ifPromiscuousMode.2 = INTEGER: false(2)
# IF-MIB::ifConnectorPresent.2 = INTEGER: true(1)
# IF-MIB::ifCounterDiscontinuityTime.2 = Timeticks: (0) 0:00:00.00
logger.info("Fetched data for %u interfaces" %
len(vppstat_ifstat['ifNames']))
vppstat_lastread = time.time()
vppstat.disconnect()
except Exception as e:
logger.error("Error occured, releasing lock: ", e)
vppstat_lock.release()
return
class ifName(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_OCTETSTRING(str(i + 1), vppstat_ifstat['ifNames'][i])
class ifAlias(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_OCTETSTRING(str(i + 1), vppstat_ifstat['ifNames'][i])
class ifInMulticastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER32(str(i + 1),
vppstat_ifstat['ifHCInMulticastPkts'][i])
class ifInBroadcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER32(str(i + 1),
vppstat_ifstat['ifHCInBroadcastPkts'][i])
class ifOutMulticastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER32(str(i + 1),
vppstat_ifstat['ifHCOutMulticastPkts'][i])
class ifOutBroadcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER32(str(i + 1),
vppstat_ifstat['ifHCOutBroadcastPkts'][i])
class ifHCInOctets(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1), vppstat_ifstat['ifHCInOctets'][i])
class ifHCInUcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCInUcastPkts'][i])
class ifHCInMulticastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCInMulticastPkts'][i])
class ifHCInBroadcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCInBroadcastPkts'][i])
class ifHCOutOctets(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1), vppstat_ifstat['ifHCOutOctets'][i])
class ifHCOutUcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCOutUcastPkts'][i])
class ifHCOutMulticastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCOutMulticastPkts'][i])
class ifHCOutBroadcastPkts(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_COUNTER64(str(i + 1),
vppstat_ifstat['ifHCOutBroadcastPkts'][i])
class ifHighSpeed(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
self.set_GAUGE32(str(i + 1), vppstat_ifstat['ifHighSpeed'][i])
class ifPromiscuousMode(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
# Hardcode to false(2)
self.set_INTEGER(str(i + 1), 2)
class ifConnectorPresent(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
# Hardcode to true(1)
self.set_INTEGER(str(i + 1), 1)
class ifCounterDiscontinuityTime(pyagentx.Updater):
def update(self):
global vppstat_ifstat
vppstat_update()
for i in range(len(vppstat_ifstat['ifNames'])):
# Hardcode to Timeticks: (0) 0:00:00.00
self.set_TIMETICKS(str(i + 1), 0)
class MyAgent(pyagentx.Agent):
def setup(self):
self.register('1.3.6.1.2.1.31.1.1.1.1', ifName)
self.register('1.3.6.1.2.1.31.1.1.1.2', ifInMulticastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.3', ifInBroadcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.4', ifOutMulticastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.5', ifOutBroadcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.6', ifHCInOctets)
self.register('1.3.6.1.2.1.31.1.1.1.7', ifHCInUcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.8', ifHCInMulticastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.9', ifHCInBroadcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.10', ifHCOutOctets)
self.register('1.3.6.1.2.1.31.1.1.1.11', ifHCOutUcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.12', ifHCOutMulticastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.13', ifHCOutBroadcastPkts)
self.register('1.3.6.1.2.1.31.1.1.1.14', ifHighSpeed)
self.register('1.3.6.1.2.1.31.1.1.1.15', ifPromiscuousMode)
self.register('1.3.6.1.2.1.31.1.1.1.16', ifConnectorPresent)
self.register('1.3.6.1.2.1.31.1.1.1.18', ifCounterDiscontinuityTime)
self.register('1.3.6.1.2.1.31.1.1.1.17', ifAlias)
def main():
pyagentx.setup_logging()
try:
a = MyAgent()
a.start()
except Exception as e:
print("Unhandled exception:", e)
a.stop()
except KeyboardInterrupt:
a.stop()
if __name__ == "__main__":
main()

13
vpp-snmp-agent.service Normal file
View File

@ -0,0 +1,13 @@
[Unit]
Description=SNMP AgentX Daemon for VPP dataplane statistics
After=network.target
ConditionPathExists=/etc/snmp/snmpd.conf
[Service]
Type=simple
ExecStart=/sbin/ip netns exec dataplane runuser -u Debian-snmp -- /usr/local/sbin/vpp-snmp-agent
Group=vpp
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target

40
vpp-snmp-agent.spec Normal file
View File

@ -0,0 +1,40 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['vpp-snmp-agent.py'],
pathex=['/home/pim/src/vpp-snmp-agentx'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='vpp-snmp-agent',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )

491
vppstats.py Normal file
View File

@ -0,0 +1,491 @@
'''
VPPStats is a shared memory implementation that exposes the VPP statseg as
an associative array. Counters can be accessed in either dimension.
stat['/if/rx'] - returns 2D lists
stat['/if/rx'][0] - returns counters for all interfaces for thread 0
stat['/if/rx'][0][1] - returns counter for interface 1 on thread 0
stat['/if/rx'][0][1]['packets'] - returns the packet counter
for interface 1 on thread 0
stat['/if/rx'][:, 1] - returns the counters for interface 1 on all threads
stat['/if/rx'][:, 1].packets() - returns the packet counters for
interface 1 on all threads
stat['/if/rx'][:, 1].sum_packets() - returns the sum of packet counters for
interface 1 on all threads
stat['/if/rx-miss'][:, 1].sum() - returns the sum of packet counters for
interface 1 on all threads for simple counters
Usage:
stat = VPPStats()
stat.connect()
for x in range(10):
print('version ', stat.version)
print('epoch ', stat.epoch)
print('/if/names', stat['/if/names'])
print('/if/rx[1].packets', stat['/if/rx'][:, 1].sum_packets())
print('/if/rx[1].octets', stat['/if/rx'][:, 1].sum_octets())
print('/if/tx[1].packets', stat['/if/tx'][:, 1].sum_packets())
print('/if/tx[1].octets', stat['/if/tx'][:, 1].sum_octets())
print("")
time.sleep(10)
stat.disconnect()
'''
import os
import socket
import array
import mmap
from struct import Struct
import time
import re
VEC_LEN_FMT = Struct('I')
def get_vec_len(stats, vector_offset):
'''Equivalent to VPP vec_len()'''
return VEC_LEN_FMT.unpack_from(stats.statseg, vector_offset - 8)[0]
def get_string(stats, ptr):
'''Get a string from a VPP vector'''
namevector = ptr - stats.base
namevectorlen = get_vec_len(stats, namevector)
if namevector + namevectorlen >= stats.size:
raise IOError('String overruns stats segment')
return stats.statseg[namevector:namevector + namevectorlen -
1].decode('ascii')
class StatsVector:
'''A class representing a VPP vector'''
def __init__(self, stats, ptr, fmt):
self.vec_start = ptr - stats.base
self.vec_len = get_vec_len(stats, ptr - stats.base)
self.struct = Struct(fmt)
self.fmtlen = len(fmt)
self.elementsize = self.struct.size
self.statseg = stats.statseg
self.stats = stats
if self.vec_start + self.vec_len * self.elementsize >= stats.size:
raise IOError('Vector overruns stats segment')
def __iter__(self):
with self.stats.lock:
return self.struct.iter_unpack(
self.statseg[self.vec_start:self.vec_start +
self.elementsize * self.vec_len])
def __getitem__(self, index):
if index > self.vec_len:
raise IOError('Index beyond end of vector')
with self.stats.lock:
if self.fmtlen == 1:
return self.struct.unpack_from(
self.statseg,
self.vec_start + (index * self.elementsize))[0]
return self.struct.unpack_from(
self.statseg, self.vec_start + (index * self.elementsize))
class VPPStats():
'''Main class implementing Python access to the VPP statistics segment'''
# pylint: disable=too-many-instance-attributes
shared_headerfmt = Struct('QPQQPP')
default_socketname = '/run/vpp/stats.sock'
def __init__(self, socketname=default_socketname, timeout=10):
self.socketname = socketname
self.timeout = timeout
self.directory = {}
self.lock = StatsLock(self)
self.connected = False
self.size = 0
self.last_epoch = 0
self.error_vectors = 0
self.statseg = 0
def connect(self):
'''Connect to stats segment'''
if self.connected:
return
sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
sock.connect(self.socketname)
# Get file descriptor for memory map
fds = array.array("i") # Array of ints
_, ancdata, _, _ = sock.recvmsg(0, socket.CMSG_LEN(4))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS:
fds.frombytes(cmsg_data[:len(cmsg_data) -
(len(cmsg_data) % fds.itemsize)])
mfd = list(fds)[0]
sock.close()
stat_result = os.fstat(mfd)
self.statseg = mmap.mmap(mfd, stat_result.st_size, mmap.PROT_READ,
mmap.MAP_SHARED)
os.close(mfd)
self.size = stat_result.st_size
if self.version != 2:
raise Exception('Incompatbile stat segment version {}'.format(
self.version))
self.refresh()
self.connected = True
def disconnect(self):
'''Disconnect from stats segment'''
if self.connected:
self.statseg.close()
self.connected = False
@property
def version(self):
'''Get version of stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[0]
@property
def base(self):
'''Get base pointer of stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[1]
@property
def epoch(self):
'''Get current epoch value from stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[2]
@property
def in_progress(self):
'''Get value of in_progress from stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[3]
@property
def directory_vector(self):
'''Get pointer of directory vector'''
return self.shared_headerfmt.unpack_from(self.statseg)[4]
@property
def error_vector(self):
'''Get pointer of error vector'''
return self.shared_headerfmt.unpack_from(self.statseg)[5]
elementfmt = 'IQ128s'
def refresh(self, blocking=True):
'''Refresh directory vector cache (epoch changed)'''
directory = {}
directory_by_idx = {}
while True:
try:
with self.lock:
self.last_epoch = self.epoch
for i, direntry in enumerate(
StatsVector(self, self.directory_vector,
self.elementfmt)):
path_raw = direntry[2].find(b'\x00')
path = direntry[2][:path_raw].decode('ascii')
directory[path] = StatsEntry(direntry[0], direntry[1])
directory_by_idx[i] = path
self.directory = directory
self.directory_by_idx = directory_by_idx
# Cache the error index vectors
self.error_vectors = []
for threads in StatsVector(self, self.error_vector, 'P'):
self.error_vectors.append(
StatsVector(self, threads[0], 'Q'))
return
except IOError:
if not blocking:
raise
def __getitem__(self, item, blocking=True):
if not self.connected:
self.connect()
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
return self.directory[item].get_counter(self)
except IOError:
if not blocking:
raise
def __iter__(self):
return iter(self.directory.items())
def set_errors(self, blocking=True):
'''Return dictionary of error counters > 0'''
if not self.connected:
self.connect()
errors = {
k: v
for k, v in self.directory.items() if k.startswith("/err/")
}
result = {}
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
for k, entry in errors.items():
total = 0
i = entry.value
for per_thread in self.error_vectors:
total += per_thread[i]
if total:
result[k] = total
return result
except IOError:
if not blocking:
raise
def set_errors_str(self, blocking=True):
'''Return all errors counters > 0 pretty printed'''
error_string = ['ERRORS:']
error_counters = self.set_errors(blocking)
for k in sorted(error_counters):
error_string.append('{:<60}{:>10}'.format(k, error_counters[k]))
return '%s\n' % '\n'.join(error_string)
def get_counter(self, name, blocking=True):
'''Alternative call to __getitem__'''
return self.__getitem__(name, blocking)
def get_err_counter(self, name, blocking=True):
'''Return a single value (sum of all threads)'''
if not self.connected:
self.connect()
if name.startswith("/err/"):
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
return sum(self.directory[name].get_counter(self))
except IOError:
if not blocking:
raise
def ls(self, patterns):
'''Returns list of counters matching pattern'''
# pylint: disable=invalid-name
if not self.connected:
self.connect()
if not isinstance(patterns, list):
patterns = [patterns]
regex = [re.compile(i) for i in patterns]
return [
k for k, v in self.directory.items() if any(
re.match(pattern, k) for pattern in regex)
]
def dump(self, counters, blocking=True):
'''Given a list of counters return a dictionary of results'''
if not self.connected:
self.connect()
result = {}
for cnt in counters:
result[cnt] = self.__getitem__(cnt, blocking)
return result
class StatsLock():
'''Stat segment optimistic locking'''
def __init__(self, stats):
self.stats = stats
self.epoch = 0
def __enter__(self):
acquired = self.acquire(blocking=True)
assert acquired, "Lock wasn't acquired, but blocking=True"
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self.release()
def acquire(self, blocking=True, timeout=-1):
'''Acquire the lock. Await in progress to go false. Record epoch.'''
self.epoch = self.stats.epoch
if timeout > 0:
start = time.monotonic()
while self.stats.in_progress:
if not blocking:
time.sleep(0.01)
if timeout > 0:
if start + time.monotonic() > timeout:
return False
return True
def release(self):
'''Check if data read while locked is valid'''
if self.stats.in_progress or self.stats.epoch != self.epoch:
raise IOError('Optimistic lock failed, retry')
def locked(self):
'''Not used'''
class StatsCombinedList(list):
'''Column slicing for Combined counters list'''
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
if isinstance(item, int):
return list.__getitem__(self, item)
return CombinedList([row[item[1]] for row in self])
class CombinedList(list):
'''Combined Counters 2-dimensional by thread by index of packets/octets'''
def packets(self):
'''Return column (2nd dimension). Packets for all threads'''
return [pair[0] for pair in self]
def octets(self):
'''Return column (2nd dimension). Octets for all threads'''
return [pair[1] for pair in self]
def sum_packets(self):
'''Return column (2nd dimension). Sum of all packets for all threads'''
return sum(self.packets())
def sum_octets(self):
'''Return column (2nd dimension). Sum of all octets for all threads'''
return sum(self.octets())
class StatsTuple(tuple):
'''A Combined vector tuple (packets, octets)'''
def __init__(self, data):
self.dictionary = {'packets': data[0], 'bytes': data[1]}
super().__init__()
def __repr__(self):
return dict.__repr__(self.dictionary)
def __getitem__(self, item):
if isinstance(item, int):
return tuple.__getitem__(self, item)
if item == 'packets':
return tuple.__getitem__(self, 0)
return tuple.__getitem__(self, 1)
class StatsSimpleList(list):
'''Simple Counters 2-dimensional by thread by index of packets'''
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
if isinstance(item, int):
return list.__getitem__(self, item)
return SimpleList([row[item[1]] for row in self])
class SimpleList(list):
'''Simple counter'''
def sum(self):
'''Sum the vector'''
return sum(self)
class StatsEntry():
'''An individual stats entry'''
# pylint: disable=unused-argument,no-self-use
def __init__(self, stattype, statvalue):
self.type = stattype
self.value = statvalue
if stattype == 1:
self.function = self.scalar
elif stattype == 2:
self.function = self.simple
elif stattype == 3:
self.function = self.combined
elif stattype == 4:
self.function = self.error
elif stattype == 5:
self.function = self.name
elif stattype == 7:
self.function = self.symlink
else:
self.function = self.illegal
def illegal(self, stats):
'''Invalid or unknown counter type'''
return None
def scalar(self, stats):
'''Scalar counter'''
return self.value
def simple(self, stats):
'''Simple counter'''
counter = StatsSimpleList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [v[0] for v in StatsVector(stats, threads[0], 'Q')]
counter.append(clist)
return counter
def combined(self, stats):
'''Combined counter'''
counter = StatsCombinedList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [
StatsTuple(cnt) for cnt in StatsVector(stats, threads[0], 'QQ')
]
counter.append(clist)
return counter
def error(self, stats):
'''Error counter'''
counter = SimpleList()
for clist in stats.error_vectors:
counter.append(clist[self.value])
return counter
def name(self, stats):
'''Name counter'''
counter = []
for name in StatsVector(stats, self.value, 'P'):
if name[0]:
counter.append(get_string(stats, name[0]))
return counter
SYMLINK_FMT1 = Struct('II')
SYMLINK_FMT2 = Struct('Q')
def symlink(self, stats):
'''Symlink counter'''
b = self.SYMLINK_FMT2.pack(self.value)
index1, index2 = self.SYMLINK_FMT1.unpack(b)
name = stats.directory_by_idx[index1]
return stats[name][:, index2]
def get_counter(self, stats):
'''Return a list of counters'''
if stats:
return self.function(stats)
#
# stat = VPPStats(socketname='/run/vpp/stats.sock', timeout=2)
# stat.connect()
# print('version ', stat.version)
# print('epoch ', stat.epoch)
# print('/if/names', stat['/if/names'])
#
# for x in range(10):
# idx=2
# print('/if/rx[%s] packets %u octets %u' % (stat['/if/names'][idx], stat['/if/rx'][:, idx].sum_packets(), stat['/if/rx'][:, idx].sum_octets()))
# print('/if/tx[%s] packets %u octets %u' % (stat['/if/names'][idx], stat['/if/tx'][:, idx].sum_packets(), stat['/if/tx'][:, idx].sum_octets()))
# print("")
# time.sleep(10)
#
# stat.disconnect()