Format with black

This commit is contained in:
Pim van Pelt
2023-01-08 13:05:42 +01:00
parent 72e9cf3503
commit 5e11539b44
8 changed files with 540 additions and 355 deletions

View File

@ -11,19 +11,23 @@ import logging
from agentx.agent import Agent
from agentx.dataset import DataSet
def setup_logging(debug=False):
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger = logging.getLogger('agentx')
logger = logging.getLogger("agentx")
logger.setLevel(level)
formatter = logging.Formatter('[%(levelname)-8s] %(name)17s - %(funcName)-15s: %(message)s')
formatter = logging.Formatter(
"[%(levelname)-8s] %(name)17s - %(funcName)-15s: %(message)s"
)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
AGENTX_EMPTY_PDU = 1
AGENTX_OPEN_PDU = 1
AGENTX_CLOSE_PDU = 2

View File

@ -12,14 +12,15 @@ import agentx
from agentx.dataset import DataSet
from agentx.network import Network
class NullHandler(logging.Handler):
def emit(self, record):
pass
class Agent(object):
def __init__(self, server_address='/var/agentx/master', period=30.0):
self.logger = logging.getLogger('agentx.agent')
def __init__(self, server_address="/var/agentx/master", period=30.0):
self.logger = logging.getLogger("agentx.agent")
self.logger.addHandler(NullHandler())
self._servingset = DataSet()
@ -41,43 +42,44 @@ class Agent(object):
return True
def run(self):
self.logger.info('Calling setup')
self.logger.info("Calling setup")
if not self.setup():
self.logger.error('Setup failed - exiting')
self.logger.error("Setup failed - exiting")
return
self.logger.info('Initial update')
self.logger.info("Initial update")
self._update()
while True:
if not self._net.is_connected():
self.logger.info('Opening AgentX connection')
self.logger.info("Opening AgentX connection")
self._net.start(self._oid_list)
if time.time() - self._lastupdate > self._update_period:
if not self._update():
self.logger.warning('Update failed, last successful update was %s' % self._lastupdate)
self.logger.warning(
"Update failed, last successful update was %s"
% self._lastupdate
)
time.sleep(1)
try:
self._net.run()
except Exception as e:
self.logger.error('An exception occurred: %s' % e)
self.logger.error('Reconnecting')
self.logger.error("An exception occurred: %s" % e)
self.logger.error("Reconnecting")
self._net.disconnect()
time.sleep(0.1)
def stop(self):
self.logger.debug('Stopping')
self.logger.debug("Stopping")
self._net.disconnect()
pass
def setup(self):
# Override this
pass
def update(self):
# Override this
pass
@ -88,5 +90,5 @@ class Agent(object):
for oid in oid_list:
if not oid in self._oid_list:
self.logger.debug('Adding %s to list' % oid)
self.logger.debug("Adding %s to list" % oid)
self._oid_list.append(oid)

View File

@ -9,38 +9,36 @@ from __future__ import (
import time
import agentx
class DataSetError(Exception):
pass
class DataSet():
class DataSet:
def __init__(self):
self._data = {}
def set(self, oid, oid_type, value):
if oid_type.startswith('int'):
if oid_type.startswith("int"):
t = agentx.TYPE_INTEGER
elif oid_type.startswith('str'):
elif oid_type.startswith("str"):
t = agentx.TYPE_OCTETSTRING
elif oid_type.startswith('oid'):
elif oid_type.startswith("oid"):
t = agentx.TYPE_OBJECTIDENTIFIER
elif oid_type.startswith('ip'):
elif oid_type.startswith("ip"):
t = agentx.TYPE_IPADDRESS
elif oid_type == 'counter32' or oid_type == 'uint32' or oid_type == 'u32':
elif oid_type == "counter32" or oid_type == "uint32" or oid_type == "u32":
t = agentx.TYPE_COUNTER32
elif oid_type == 'gauge32':
elif oid_type == "gauge32":
t = agentx.TYPE_GAUGE32
elif oid_type.startswith('time') or oid_type.startswith('tick'):
elif oid_type.startswith("time") or oid_type.startswith("tick"):
t = agentx.TYPE_TIMETICKS
elif oid_type.startswith('opaque'):
elif oid_type.startswith("opaque"):
t = agentx.TYPE_OPAQUE
elif oid_type == 'counter64' or oid_type == 'uint64' or oid_type == 'u64':
elif oid_type == "counter64" or oid_type == "uint64" or oid_type == "u64":
t = agentx.TYPE_COUNTER64
else:
raise DataSetErrror('Invalid oid_type: %s' % (oid_type))
raise DataSetErrror("Invalid oid_type: %s" % (oid_type))
return
self._data[oid] = {
'name': oid,
'type': t,
'value': value
}
self._data[oid] = {"name": oid, "type": t, "value": value}

View File

@ -17,15 +17,17 @@ class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('agentx.network')
logger = logging.getLogger("agentx.network")
logger.addHandler(NullHandler())
class NetworkError(Exception):
pass
class Network():
def __init__(self, server_address = '/var/agentx/master'):
class Network:
def __init__(self, server_address="/var/agentx/master"):
self.session_id = 0
self.transaction_id = 0
@ -37,19 +39,18 @@ class Network():
self._server_address = server_address
self._timeout = 0.1 # Seconds
def connect(self):
if self._connected:
return
try:
logger.info("Connecting to %s" % self._server_address)
if self._server_address.startswith('/'):
if self._server_address.startswith("/"):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(self._server_address)
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host, port=self._server_address.split(':')
host, port = self._server_address.split(":")
self.socket.connect((host, int(port)))
self.socket.settimeout(self._timeout)
@ -76,7 +77,9 @@ class Network():
self.data = newdata.copy()
del self.data_idx
self.data_idx = sorted(self.data.keys(), key=lambda k: tuple(int(part) for part in k.split('.')))
self.data_idx = sorted(
self.data.keys(), key=lambda k: tuple(int(part) for part in k.split("."))
)
def new_pdu(self, type):
pdu = PDU(type)
@ -93,15 +96,18 @@ class Network():
return pdu
def send_pdu(self, pdu):
if self.debug: pdu.dump()
if self.debug:
pdu.dump()
self.socket.send(pdu.encode())
def recv_pdu(self):
buf = self.socket.recv(100000)
if not buf: return None
if not buf:
return None
pdu = PDU()
pdu.decode(buf)
if self.debug: pdu.dump()
if self.debug:
pdu.dump()
return pdu
# =========================================
@ -118,10 +124,10 @@ class Network():
else:
# No exact match, find prefix
# logger.debug('get_next_oid, no exact match of %s' % oid)
slist = oid.split('.')
elist = endoid.split('.')
slist = oid.split(".")
elist = endoid.split(".")
for tmp_oid in self.data_idx:
tlist = tmp_oid.split('.')
tlist = tmp_oid.split(".")
for i in range(len(tlist)):
try:
sok = int(slist[i]) <= int(tlist[i])
@ -168,7 +174,7 @@ class Network():
def run(self, timeout=0.1):
if not self._connected:
raise NetworkError('Not connected')
raise NetworkError("Not connected")
if timeout != self._timeout:
self.socket.settimeout(timeout)
@ -195,11 +201,13 @@ class Network():
response.values.append(self.data[oid])
else:
logger.debug("OID Not Found!")
response.values.append({
'type': agentx.TYPE_NOSUCHOBJECT,
'name': rvalue[0],
'value': 0
})
response.values.append(
{
"type": agentx.TYPE_NOSUCHOBJECT,
"name": rvalue[0],
"value": 0,
}
)
elif request.type == agentx.AGENTX_GETNEXT_PDU:
logger.debug("Received GET_NEXT PDU")
@ -209,11 +217,13 @@ class Network():
if oid:
response.values.append(self.data[oid])
else:
response.values.append({
'type': agentx.TYPE_ENDOFMIBVIEW,
'name': rvalue[0],
'value': 0
})
response.values.append(
{
"type": agentx.TYPE_ENDOFMIBVIEW,
"name": rvalue[0],
"value": 0,
}
)
else:
logger.warn("Received unsupported PDU %d" % request.type)

View File

@ -12,10 +12,13 @@ import pprint
import logging
import agentx
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('agentx.pdu')
logger = logging.getLogger("agentx.pdu")
logger.addHandler(NullHandler())
@ -27,34 +30,32 @@ class PDU(object):
self.packet_id = 0
self.error = agentx.ERROR_NOAGENTXERROR
self.error_index = 0
self.decode_buf = ''
self.decode_buf = ""
self.state = {}
self.values = []
def dump(self):
name = agentx.PDU_TYPE_NAME[self.type]
logger.debug('PDU DUMP: New PDU')
logger.debug("PDU DUMP: New PDU")
logger.debug(
'PDU DUMP: Meta : [%s: %d %d %d]' %
(name, self.session_id, self.transaction_id, self.packet_id))
if 'payload_length' in self.state:
logger.debug('PDU DUMP: Length : %s' %
self.state['payload_length'])
if hasattr(self, 'response'):
logger.debug('PDU DUMP: Response : %s' % self.response)
if hasattr(self, 'values'):
logger.debug('PDU DUMP: Values : %s' %
pprint.pformat(self.values))
if hasattr(self, 'range_list'):
logger.debug('PDU DUMP: Range list: %s' %
pprint.pformat(self.range_list))
"PDU DUMP: Meta : [%s: %d %d %d]"
% (name, self.session_id, self.transaction_id, self.packet_id)
)
if "payload_length" in self.state:
logger.debug("PDU DUMP: Length : %s" % self.state["payload_length"])
if hasattr(self, "response"):
logger.debug("PDU DUMP: Response : %s" % self.response)
if hasattr(self, "values"):
logger.debug("PDU DUMP: Values : %s" % pprint.pformat(self.values))
if hasattr(self, "range_list"):
logger.debug("PDU DUMP: Range list: %s" % pprint.pformat(self.range_list))
# ====================================================
# encode functions
def encode_oid(self, oid, include=0):
oid = oid.strip()
oid = oid.split('.')
oid = oid.split(".")
oid = [int(i) for i in oid]
if len(oid) > 5 and oid[:4] == [1, 3, 6, 1]:
# prefix
@ -63,66 +64,70 @@ class PDU(object):
else:
# no prefix
prefix = 0
buf = struct.pack('BBBB', len(oid), prefix, include, 0)
buf = struct.pack("BBBB", len(oid), prefix, include, 0)
for i in range(len(oid)):
buf += struct.pack('!L', oid[i])
buf += struct.pack("!L", oid[i])
return buf
def encode_octet(self, octet):
octet = octet.encode("utf-8")
buf = struct.pack('!L', len(octet))
buf = struct.pack("!L", len(octet))
buf += octet
padding = (4 - (len(octet) % 4)) % 4
buf += chr(0).encode() * padding
return buf
def encode_value(self, type, name, value):
buf = struct.pack('!HH', type, 0)
buf = struct.pack("!HH", type, 0)
buf += self.encode_oid(name)
if type in [agentx.TYPE_INTEGER]:
buf += struct.pack('!l', value)
buf += struct.pack("!l", value)
elif type in [
agentx.TYPE_COUNTER32, agentx.TYPE_GAUGE32,
agentx.TYPE_TIMETICKS
agentx.TYPE_COUNTER32,
agentx.TYPE_GAUGE32,
agentx.TYPE_TIMETICKS,
]:
buf += struct.pack('!L', value)
buf += struct.pack("!L", value)
elif type in [agentx.TYPE_COUNTER64]:
buf += struct.pack('!Q', value)
buf += struct.pack("!Q", value)
elif type in [agentx.TYPE_OBJECTIDENTIFIER]:
buf += self.encode_oid(value)
elif type in [
agentx.TYPE_IPADDRESS, agentx.TYPE_OPAQUE,
agentx.TYPE_OCTETSTRING
agentx.TYPE_IPADDRESS,
agentx.TYPE_OPAQUE,
agentx.TYPE_OCTETSTRING,
]:
buf += self.encode_octet(value)
elif type in [
agentx.TYPE_NULL, agentx.TYPE_NOSUCHOBJECT,
agentx.TYPE_NOSUCHINSTANCE, agentx.TYPE_ENDOFMIBVIEW
agentx.TYPE_NULL,
agentx.TYPE_NOSUCHOBJECT,
agentx.TYPE_NOSUCHINSTANCE,
agentx.TYPE_ENDOFMIBVIEW,
]:
# No data
pass
else:
logger.error('Unknown Type:' % type)
logger.error("Unknown Type:" % type)
return buf
def encode_header(self, pdu_type, payload_length=0, flags=0):
flags = flags | 0x10 # Bit 5 = all ints in NETWORK_BYTE_ORDER
buf = struct.pack('BBBB', 1, pdu_type, flags, 0)
buf += struct.pack('!L', self.session_id) # sessionID
buf += struct.pack('!L', self.transaction_id) # transactionID
buf += struct.pack('!L', self.packet_id) # packetID
buf += struct.pack('!L', payload_length)
buf = struct.pack("BBBB", 1, pdu_type, flags, 0)
buf += struct.pack("!L", self.session_id) # sessionID
buf += struct.pack("!L", self.transaction_id) # transactionID
buf += struct.pack("!L", self.packet_id) # packetID
buf += struct.pack("!L", payload_length)
return buf
def encode(self):
buf = b''
buf = b""
if self.type == agentx.AGENTX_OPEN_PDU:
# timeout
buf += struct.pack('!BBBB', 5, 0, 0, 0)
buf += struct.pack("!BBBB", 5, 0, 0, 0)
# agent OID
buf += struct.pack('!L', 0)
buf += struct.pack("!L", 0)
# Agent Desc
buf += self.encode_octet('MyAgent')
buf += self.encode_octet("MyAgent")
elif self.type == agentx.AGENTX_PING_PDU:
# No extra data
@ -132,15 +137,14 @@ class PDU(object):
range_subid = 0
timeout = 5
priority = 127
buf += struct.pack('BBBB', timeout, priority, range_subid, 0)
buf += struct.pack("BBBB", timeout, priority, range_subid, 0)
# Sub Tree
buf += self.encode_oid(self.oid)
elif self.type == agentx.AGENTX_RESPONSE_PDU:
buf += struct.pack('!LHH', 0, self.error, self.error_index)
buf += struct.pack("!LHH", 0, self.error, self.error_index)
for value in self.values:
buf += self.encode_value(value['type'], value['name'],
value['value'])
buf += self.encode_value(value["type"], value["name"], value["value"])
else:
# Unsupported PDU type
@ -156,27 +160,27 @@ class PDU(object):
def decode_oid(self):
try:
t = struct.unpack('!BBBB', self.decode_buf[:4])
t = struct.unpack("!BBBB", self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
ret = {
'n_subid': t[0],
'prefix': t[1],
'include': t[2],
'reserved': t[3],
"n_subid": t[0],
"prefix": t[1],
"include": t[2],
"reserved": t[3],
}
sub_ids = []
if ret['prefix']:
if ret["prefix"]:
sub_ids = [1, 3, 6, 1]
sub_ids.append(ret['prefix'])
for i in range(ret['n_subid']):
t = struct.unpack('!L', self.decode_buf[:4])
sub_ids.append(ret["prefix"])
for i in range(ret["n_subid"]):
t = struct.unpack("!L", self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
sub_ids.append(t[0])
oid = '.'.join(str(i) for i in sub_ids)
return oid, ret['include']
oid = ".".join(str(i) for i in sub_ids)
return oid, ret["include"]
except Exception as e:
logger.exception('Invalid packing OID header')
logger.debug('%s' % pprint.pformat(self.decode_buf))
logger.exception("Invalid packing OID header")
logger.debug("%s" % pprint.pformat(self.decode_buf))
def decode_search_range(self):
start_oid, include = self.decode_oid()
@ -193,7 +197,7 @@ class PDU(object):
def decode_octet(self):
try:
t = struct.unpack('!L', self.decode_buf[:4])
t = struct.unpack("!L", self.decode_buf[:4])
l = t[0]
self.decode_buf = self.decode_buf[4:]
padding = 4 - (l % 4)
@ -201,107 +205,114 @@ class PDU(object):
self.decode_buf = self.decode_buf[l + padding :]
return buf
except Exception as e:
logger.exception('Invalid packing octet header')
logger.exception("Invalid packing octet header")
def decode_value(self):
try:
vtype, _ = struct.unpack('!HH', self.decode_buf[:4])
vtype, _ = struct.unpack("!HH", self.decode_buf[:4])
self.decode_buf = self.decode_buf[4:]
except Exception as e:
logger.exception('Invalid packing value header')
logger.exception("Invalid packing value header")
oid, _ = self.decode_oid()
if vtype in [
agentx.TYPE_INTEGER, agentx.TYPE_COUNTER32,
agentx.TYPE_GAUGE32, agentx.TYPE_TIMETICKS
agentx.TYPE_INTEGER,
agentx.TYPE_COUNTER32,
agentx.TYPE_GAUGE32,
agentx.TYPE_TIMETICKS,
]:
data = struct.unpack('!L', self.decode_buf[:4])
data = struct.unpack("!L", self.decode_buf[:4])
data = data[0]
self.decode_buf = self.decode_buf[4:]
elif vtype in [agentx.TYPE_COUNTER64]:
data = struct.unpack('!Q', self.decode_buf[:8])
data = struct.unpack("!Q", self.decode_buf[:8])
data = data[0]
self.decode_buf = self.decode_buf[8:]
elif vtype in [agentx.TYPE_OBJECTIDENTIFIER]:
data, _ = self.decode_oid()
elif vtype in [
agentx.TYPE_IPADDRESS, agentx.TYPE_OPAQUE,
agentx.TYPE_OCTETSTRING
agentx.TYPE_IPADDRESS,
agentx.TYPE_OPAQUE,
agentx.TYPE_OCTETSTRING,
]:
data = self.decode_octet()
elif vtype in [
agentx.TYPE_NULL, agentx.TYPE_NOSUCHOBJECT,
agentx.TYPE_NOSUCHINSTANCE, agentx.TYPE_ENDOFMIBVIEW
agentx.TYPE_NULL,
agentx.TYPE_NOSUCHOBJECT,
agentx.TYPE_NOSUCHINSTANCE,
agentx.TYPE_ENDOFMIBVIEW,
]:
# No data
data = None
else:
logger.error('Unknown Type: %s' % vtype)
return {'type': vtype, 'name': oid, 'data': data}
logger.error("Unknown Type: %s" % vtype)
return {"type": vtype, "name": oid, "data": data}
def decode_header(self):
try:
t = struct.unpack('!BBBBLLLL', self.decode_buf[:20])
t = struct.unpack("!BBBBLLLL", self.decode_buf[:20])
self.decode_buf = self.decode_buf[20:]
ret = {
'version': t[0],
'pdu_type': t[1],
'pdu_type_name': agentx.PDU_TYPE_NAME[t[1]],
'flags': t[2],
'reserved': t[3],
'session_id': t[4],
'transaction_id': t[5],
'packet_id': t[6],
'payload_length': t[7],
"version": t[0],
"pdu_type": t[1],
"pdu_type_name": agentx.PDU_TYPE_NAME[t[1]],
"flags": t[2],
"reserved": t[3],
"session_id": t[4],
"transaction_id": t[5],
"packet_id": t[6],
"payload_length": t[7],
}
self.state = ret
self.type = ret['pdu_type']
self.session_id = ret['session_id']
self.packet_id = ret['packet_id']
self.transaction_id = ret['transaction_id']
self.decode_buf = self.decode_buf[:ret['payload_length']]
if ret['flags'] & 0x08: # content present
self.type = ret["pdu_type"]
self.session_id = ret["session_id"]
self.packet_id = ret["packet_id"]
self.transaction_id = ret["transaction_id"]
self.decode_buf = self.decode_buf[: ret["payload_length"]]
if ret["flags"] & 0x08: # content present
context = self.decode_octet()
logger.debug('Context: %s' % context)
logger.debug("Context: %s" % context)
return ret
except Exception as e:
logger.exception('Invalid packing: %d' % len(self.decode_buf))
logger.debug('%s' % pprint.pformat(self.decode_buf))
logger.exception("Invalid packing: %d" % len(self.decode_buf))
logger.debug("%s" % pprint.pformat(self.decode_buf))
def decode(self, buf):
self.set_decode_buf(buf)
ret = self.decode_header()
if ret['pdu_type'] == agentx.AGENTX_RESPONSE_PDU:
if ret["pdu_type"] == agentx.AGENTX_RESPONSE_PDU:
# Decode Response Header
t = struct.unpack('!LHH', self.decode_buf[:8])
t = struct.unpack("!LHH", self.decode_buf[:8])
self.decode_buf = self.decode_buf[8:]
self.response = {
'sysUpTime': t[0],
'error': t[1],
'error_name': agentx.ERROR_NAMES[t[1]],
'index': t[2],
"sysUpTime": t[0],
"error": t[1],
"error_name": agentx.ERROR_NAMES[t[1]],
"index": t[2],
}
# Decode VarBindList
self.values = []
while len(self.decode_buf):
self.values.append(self.decode_value())
elif ret['pdu_type'] == agentx.AGENTX_GET_PDU:
elif ret["pdu_type"] == agentx.AGENTX_GET_PDU:
self.range_list = self.decode_search_range_list()
elif ret['pdu_type'] == agentx.AGENTX_GETNEXT_PDU:
elif ret["pdu_type"] == agentx.AGENTX_GETNEXT_PDU:
self.range_list = self.decode_search_range_list()
elif ret['pdu_type'] == agentx.AGENTX_TESTSET_PDU:
elif ret["pdu_type"] == agentx.AGENTX_TESTSET_PDU:
# Decode VarBindList
self.values = []
while len(self.decode_buf):
self.values.append(self.decode_value())
elif ret['pdu_type'] in [
agentx.AGENTX_COMMITSET_PDU, agentx.AGENTX_UNDOSET_PDU,
agentx.AGENTX_CLEANUPSET_PDU
elif ret["pdu_type"] in [
agentx.AGENTX_COMMITSET_PDU,
agentx.AGENTX_UNDOSET_PDU,
agentx.AGENTX_CLEANUPSET_PDU,
]:
pass
else:
pdu_type_str = agentx.PDU_TYPE_NAME.get(
ret['pdu_type'], 'Unknown:' + str(ret['pdu_type']))
logger.error('Unsupported PDU type:' + pdu_type_str)
ret["pdu_type"], "Unknown:" + str(ret["pdu_type"])
)
logger.error("Unsupported PDU type:" + pdu_type_str)

View File

@ -13,6 +13,7 @@ except ImportError:
print("ERROR: install argparse manually: sudo pip install argparse")
sys.exit(2)
def get_phy_by_sw_if_index(ifaces, sw_if_index):
try:
for k, v in ifaces.items():
@ -35,27 +36,27 @@ def get_lcp_by_host_sw_if_index(lcp, host_sw_if_index):
def get_description_by_ifname(config, name):
try:
if 'interfaces' in config:
for ifname, iface in config['interfaces'].items():
if "interfaces" in config:
for ifname, iface in config["interfaces"].items():
if ifname == name:
return iface['description']
if 'sub-interfaces' in iface:
for sub_id, sub_iface in iface['sub-interfaces'].items():
return iface["description"]
if "sub-interfaces" in iface:
for sub_id, sub_iface in iface["sub-interfaces"].items():
sub_ifname = "%s.%d" % (ifname, sub_id)
if name == sub_ifname:
return sub_iface['description']
if 'loopbacks' in config:
for ifname, iface in config['loopbacks'].items():
return sub_iface["description"]
if "loopbacks" in config:
for ifname, iface in config["loopbacks"].items():
if ifname == name:
return iface['description']
if 'taps' in config:
for ifname, iface in config['taps'].items():
return iface["description"]
if "taps" in config:
for ifname, iface in config["taps"].items():
if ifname == name:
return iface['description']
if 'vxlan_tunnels' in config:
for ifname, iface in config['vxlan_tunnels'].items():
return iface["description"]
if "vxlan_tunnels" in config:
for ifname, iface in config["vxlan_tunnels"].items():
if ifname == name:
return iface['description']
return iface["description"]
except:
pass
return None
@ -77,25 +78,24 @@ class MyAgent(agentx.Agent):
try:
self.logger.info("Connecting to VPP Stats Segment")
vppstat = VPPStats(socketname='/run/vpp/stats.sock', timeout=2)
vppstat = VPPStats(socketname="/run/vpp/stats.sock", timeout=2)
vppstat.connect()
except:
self.logger.error("Could not connect to VPPStats segment")
return False
try:
vpp = VPPApi(clientname='vpp-snmp-agent')
vpp = VPPApi(clientname="vpp-snmp-agent")
vpp.connect()
except:
self.logger.error("Could not connect to VPP API")
return False
self.register('1.3.6.1.2.1.2.2.1')
self.register('1.3.6.1.2.1.31.1.1.1')
self.register("1.3.6.1.2.1.2.2.1")
self.register("1.3.6.1.2.1.31.1.1.1")
return True
def update(self):
global vppstat, vpp, args
@ -116,50 +116,60 @@ class MyAgent(agentx.Agent):
lcp = vpp.get_lcp()
num_ifaces = len(ifaces)
num_vppstat=len(vppstat['/if/names'])
num_vppstat = len(vppstat["/if/names"])
num_lcp = len(lcp)
self.logger.debug("LCP: %s" % (lcp))
self.logger.debug("Retrieved Interfaces: vppapi=%d vppstats=%d lcp=%d" % (num_ifaces, num_vppstat, num_lcp))
self.logger.debug(
"Retrieved Interfaces: vppapi=%d vppstats=%d lcp=%d"
% (num_ifaces, num_vppstat, num_lcp)
)
if num_ifaces != num_vppstat:
self.logger.warning("Interfaces count mismatch: vppapi=%d vppstats=%d" % (num_ifaces, num_vppstat))
self.logger.warning(
"Interfaces count mismatch: vppapi=%d vppstats=%d"
% (num_ifaces, num_vppstat)
)
for i in range(len(vppstat['/if/names'])):
ifname = vppstat['/if/names'][i]
for i in range(len(vppstat["/if/names"])):
ifname = vppstat["/if/names"][i]
idx = 1000 + i
ds.set('1.3.6.1.2.1.2.2.1.1.%u' % (idx), 'int', idx)
ds.set("1.3.6.1.2.1.2.2.1.1.%u" % (idx), "int", idx)
ifName = ifname
ifAlias = None
try:
if self.config and ifname.startswith('tap'):
if self.config and ifname.startswith("tap"):
host_sw_if_index = ifaces[ifname].sw_if_index
lip = get_lcp_by_host_sw_if_index(lcp, host_sw_if_index)
if lip:
phy = get_phy_by_sw_if_index(ifaces, lip.phy_sw_if_index)
ifName = lip.host_if_name
self.logger.debug("Setting ifName of %s to '%s'" % (ifname, ifName))
self.logger.debug(
"Setting ifName of %s to '%s'" % (ifname, ifName)
)
if phy:
ifAlias = "LCP %s (%s)" % (phy.interface_name, ifname)
self.logger.debug("Setting ifAlias of %s to '%s'" % (ifname, ifAlias))
self.logger.debug(
"Setting ifAlias of %s to '%s'" % (ifname, ifAlias)
)
except:
self.logger.debug("No config entry found for ifname %s" % (ifname))
pass
ds.set('1.3.6.1.2.1.2.2.1.2.%u' % (idx), 'str', ifName)
ds.set("1.3.6.1.2.1.2.2.1.2.%u" % (idx), "str", ifName)
if ifname.startswith("loop"):
ds.set('1.3.6.1.2.1.2.2.1.3.%u' % (idx), 'int', 24) # softwareLoopback
ds.set("1.3.6.1.2.1.2.2.1.3.%u" % (idx), "int", 24) # softwareLoopback
else:
ds.set('1.3.6.1.2.1.2.2.1.3.%u' % (idx), 'int', 6) # ethermet-csmacd
ds.set("1.3.6.1.2.1.2.2.1.3.%u" % (idx), "int", 6) # ethermet-csmacd
mtu = 0
if not ifname in ifaces:
self.logger.warning("Could not get MTU for interface %s", ifname)
else:
mtu = ifaces[ifname].mtu[0]
ds.set('1.3.6.1.2.1.2.2.1.4.%u' % (idx), 'int', mtu)
ds.set("1.3.6.1.2.1.2.2.1.4.%u" % (idx), "int", mtu)
speed = 0
if ifname.startswith("loop") or ifname.startswith("tap"):
@ -170,24 +180,28 @@ class MyAgent(agentx.Agent):
speed = ifaces[ifname].link_speed * 1000
if speed >= 2 ** 32:
speed = 2 ** 32 - 1
ds.set('1.3.6.1.2.1.2.2.1.5.%u' % (idx), 'gauge32', speed)
ds.set("1.3.6.1.2.1.2.2.1.5.%u" % (idx), "gauge32", speed)
mac = "00:00:00:00:00:00"
if not ifname in ifaces:
self.logger.warning("Could not get PhysAddress for interface %s", ifname)
self.logger.warning(
"Could not get PhysAddress for interface %s", ifname
)
else:
mac = str(ifaces[ifname].l2_address)
ds.set('1.3.6.1.2.1.2.2.1.6.%u' % (idx), 'str', mac)
ds.set("1.3.6.1.2.1.2.2.1.6.%u" % (idx), "str", mac)
admin_status = 3 # testing
if not ifname in ifaces:
self.logger.warning("Could not get AdminStatus for interface %s", ifname)
self.logger.warning(
"Could not get AdminStatus for interface %s", ifname
)
else:
if int(ifaces[ifname].flags) & 1:
admin_status = 1 # up
else:
admin_status = 2 # down
ds.set('1.3.6.1.2.1.2.2.1.7.%u' % (idx), 'int', admin_status)
ds.set("1.3.6.1.2.1.2.2.1.7.%u" % (idx), "int", admin_status)
oper_status = 3 # testing
if not ifname in ifaces:
@ -197,36 +211,124 @@ class MyAgent(agentx.Agent):
oper_status = 1 # up
else:
oper_status = 2 # down
ds.set('1.3.6.1.2.1.2.2.1.8.%u' % (idx), 'int', oper_status)
ds.set("1.3.6.1.2.1.2.2.1.8.%u" % (idx), "int", oper_status)
ds.set('1.3.6.1.2.1.2.2.1.9.%u' % (idx), 'ticks', 0)
ds.set('1.3.6.1.2.1.2.2.1.10.%u' % (idx), 'u32', vppstat['/if/rx'][:, i].sum_octets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.11.%u' % (idx), 'u32', vppstat['/if/rx'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.12.%u' % (idx), 'u32', vppstat['/if/rx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.13.%u' % (idx), 'u32', vppstat['/if/rx-no-buf'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.14.%u' % (idx), 'u32', vppstat['/if/rx-error'][:, i].sum() % 2**32)
ds.set("1.3.6.1.2.1.2.2.1.9.%u" % (idx), "ticks", 0)
ds.set(
"1.3.6.1.2.1.2.2.1.10.%u" % (idx),
"u32",
vppstat["/if/rx"][:, i].sum_octets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.11.%u" % (idx),
"u32",
vppstat["/if/rx"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.12.%u" % (idx),
"u32",
vppstat["/if/rx-multicast"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.13.%u" % (idx),
"u32",
vppstat["/if/rx-no-buf"][:, i].sum() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.14.%u" % (idx),
"u32",
vppstat["/if/rx-error"][:, i].sum() % 2 ** 32,
)
ds.set('1.3.6.1.2.1.2.2.1.16.%u' % (idx), 'u32', vppstat['/if/tx'][:, i].sum_octets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.17.%u' % (idx), 'u32', vppstat['/if/tx'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.18.%u' % (idx), 'u32', vppstat['/if/tx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.19.%u' % (idx), 'u32', vppstat['/if/drops'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.20.%u' % (idx), 'u32', vppstat['/if/tx-error'][:, i].sum() % 2**32)
ds.set(
"1.3.6.1.2.1.2.2.1.16.%u" % (idx),
"u32",
vppstat["/if/tx"][:, i].sum_octets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.17.%u" % (idx),
"u32",
vppstat["/if/tx"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.18.%u" % (idx),
"u32",
vppstat["/if/tx-multicast"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.19.%u" % (idx),
"u32",
vppstat["/if/drops"][:, i].sum() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.2.2.1.20.%u" % (idx),
"u32",
vppstat["/if/tx-error"][:, i].sum() % 2 ** 32,
)
ds.set('1.3.6.1.2.1.31.1.1.1.1.%u' % (idx), 'str', ifName)
ds.set('1.3.6.1.2.1.31.1.1.1.2.%u' % (idx), 'u32', vppstat['/if/rx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.3.%u' % (idx), 'u32', vppstat['/if/rx-broadcast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.4.%u' % (idx), 'u32', vppstat['/if/tx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.5.%u' % (idx), 'u32', vppstat['/if/tx-broadcast'][:, i].sum_packets() % 2**32)
ds.set("1.3.6.1.2.1.31.1.1.1.1.%u" % (idx), "str", ifName)
ds.set(
"1.3.6.1.2.1.31.1.1.1.2.%u" % (idx),
"u32",
vppstat["/if/rx-multicast"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.3.%u" % (idx),
"u32",
vppstat["/if/rx-broadcast"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.4.%u" % (idx),
"u32",
vppstat["/if/tx-multicast"][:, i].sum_packets() % 2 ** 32,
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.5.%u" % (idx),
"u32",
vppstat["/if/tx-broadcast"][:, i].sum_packets() % 2 ** 32,
)
ds.set('1.3.6.1.2.1.31.1.1.1.6.%u' % (idx), 'u64', vppstat['/if/rx'][:, i].sum_octets())
ds.set('1.3.6.1.2.1.31.1.1.1.7.%u' % (idx), 'u64', vppstat['/if/rx'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.8.%u' % (idx), 'u64', vppstat['/if/rx-multicast'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.9.%u' % (idx), 'u64', vppstat['/if/rx-broadcast'][:, i].sum_packets())
ds.set(
"1.3.6.1.2.1.31.1.1.1.6.%u" % (idx),
"u64",
vppstat["/if/rx"][:, i].sum_octets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.7.%u" % (idx),
"u64",
vppstat["/if/rx"][:, i].sum_packets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.8.%u" % (idx),
"u64",
vppstat["/if/rx-multicast"][:, i].sum_packets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.9.%u" % (idx),
"u64",
vppstat["/if/rx-broadcast"][:, i].sum_packets(),
)
ds.set('1.3.6.1.2.1.31.1.1.1.10.%u' % (idx), 'u64', vppstat['/if/tx'][:, i].sum_octets())
ds.set('1.3.6.1.2.1.31.1.1.1.11.%u' % (idx), 'u64', vppstat['/if/tx'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.12.%u' % (idx), 'u64', vppstat['/if/tx-multicast'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.13.%u' % (idx), 'u64', vppstat['/if/tx-broadcast'][:, i].sum_packets())
ds.set(
"1.3.6.1.2.1.31.1.1.1.10.%u" % (idx),
"u64",
vppstat["/if/tx"][:, i].sum_octets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.11.%u" % (idx),
"u64",
vppstat["/if/tx"][:, i].sum_packets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.12.%u" % (idx),
"u64",
vppstat["/if/tx-multicast"][:, i].sum_packets(),
)
ds.set(
"1.3.6.1.2.1.31.1.1.1.13.%u" % (idx),
"u64",
vppstat["/if/tx-broadcast"][:, i].sum_packets(),
)
speed = 0
if ifname.startswith("loop") or ifname.startswith("tap"):
@ -235,34 +337,65 @@ class MyAgent(agentx.Agent):
self.logger.warning("Could not get link speed for interface %s", ifname)
else:
speed = int(ifaces[ifname].link_speed / 1000)
ds.set('1.3.6.1.2.1.31.1.1.1.15.%u' % (idx), 'gauge32', speed)
ds.set("1.3.6.1.2.1.31.1.1.1.15.%u" % (idx), "gauge32", speed)
ds.set('1.3.6.1.2.1.31.1.1.1.16.%u' % (idx), 'int', 2) # Hardcode to false(2)
ds.set('1.3.6.1.2.1.31.1.1.1.17.%u' % (idx), 'int', 1) # Hardcode to true(1)
ds.set(
"1.3.6.1.2.1.31.1.1.1.16.%u" % (idx), "int", 2
) # Hardcode to false(2)
ds.set(
"1.3.6.1.2.1.31.1.1.1.17.%u" % (idx), "int", 1
) # Hardcode to true(1)
if self.config and not ifAlias:
try:
descr = get_description_by_ifname(self.config, ifname)
if descr:
self.logger.debug("Setting ifAlias of %s to config description '%s'" % (ifname, descr))
self.logger.debug(
"Setting ifAlias of %s to config description '%s'"
% (ifname, descr)
)
ifAlias = descr
except:
pass
if not ifAlias:
self.logger.debug("Setting ifAlias of %s to ifname %s" % (ifname, ifname))
self.logger.debug(
"Setting ifAlias of %s to ifname %s" % (ifname, ifname)
)
ifAlias = ifname
ds.set('1.3.6.1.2.1.31.1.1.1.18.%u' % (idx), 'str', ifAlias)
ds.set('1.3.6.1.2.1.31.1.1.1.19.%u' % (idx), 'ticks', 0) # Hardcode to Timeticks: (0) 0:00:00.00
ds.set("1.3.6.1.2.1.31.1.1.1.18.%u" % (idx), "str", ifAlias)
ds.set(
"1.3.6.1.2.1.31.1.1.1.19.%u" % (idx), "ticks", 0
) # Hardcode to Timeticks: (0) 0:00:00.00
return ds
def main():
global args
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', dest='address', default="localhost:705", type=str, help="""Location of the SNMPd agent (unix-path or host:port), default localhost:705""")
parser.add_argument('-p', dest='period', type=int, default=30, help="""Period to poll VPP, default 30 (seconds)""")
parser.add_argument('-c', dest='config', type=str, help="""Optional vppcfg YAML configuration file, default empty""")
parser.add_argument('-d', dest='debug', action='store_true', help="""Enable debug, default False""")
parser.add_argument(
"-a",
dest="address",
default="localhost:705",
type=str,
help="""Location of the SNMPd agent (unix-path or host:port), default localhost:705""",
)
parser.add_argument(
"-p",
dest="period",
type=int,
default=30,
help="""Period to poll VPP, default 30 (seconds)""",
)
parser.add_argument(
"-c",
dest="config",
type=str,
help="""Optional vppcfg YAML configuration file, default empty""",
)
parser.add_argument(
"-d", dest="debug", action="store_true", help="""Enable debug, default False"""
)
args = parser.parse_args()
if args.debug:
@ -281,5 +414,6 @@ def main():
sys.exit(-1)
if __name__ == "__main__":
main()

View File

@ -1,7 +1,7 @@
'''
"""
The functions in this file interact with the VPP API to retrieve certain
interface metadata.
'''
"""
from vpp_papi import VPPApiClient
import os
@ -13,12 +13,14 @@ import socket
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('agentx.vppapi')
logger = logging.getLogger("agentx.vppapi")
logger.addHandler(NullHandler())
class VPPApi():
def __init__(self, address='/run/vpp/api.sock', clientname='vppapi-client'):
class VPPApi:
def __init__(self, address="/run/vpp/api.sock", clientname="vppapi-client"):
self.address = address
self.connected = False
self.clientname = clientname
@ -28,28 +30,27 @@ class VPPApi():
if self.connected:
return True
vpp_json_dir = '/usr/share/vpp/api/'
vpp_json_dir = "/usr/share/vpp/api/"
# construct a list of all the json api files
jsonfiles = []
for root, dirnames, filenames in os.walk(vpp_json_dir):
for filename in fnmatch.filter(filenames, '*.api.json'):
for filename in fnmatch.filter(filenames, "*.api.json"):
jsonfiles.append(os.path.join(root, filename))
if not jsonfiles:
logger.error('no json api files found')
logger.error("no json api files found")
return False
self.vpp = VPPApiClient(apifiles=jsonfiles,
server_address=self.address)
self.vpp = VPPApiClient(apifiles=jsonfiles, server_address=self.address)
try:
logger.info('Connecting to VPP')
logger.info("Connecting to VPP")
self.vpp.connect(self.clientname)
except:
return False
v = self.vpp.api.show_version()
logger.info('VPP version is %s' % v.version)
logger.info("VPP version is %s" % v.version)
self.connected = True
return True

View File

@ -1,4 +1,4 @@
'''
"""
VPPStats is a shared memory implementation that exposes the VPP statseg as
an associative array. Counters can be accessed in either dimension.
stat['/if/rx'] - returns 2D lists
@ -30,7 +30,7 @@ Usage:
stat.disconnect()
'''
"""
import os
import socket
@ -40,8 +40,9 @@ from struct import Struct
import time
import re
def recv_fd(sock):
'''Get file descriptor for memory map'''
"""Get file descriptor for memory map"""
fds = array.array("i") # Array of ints
_, ancdata, _, _ = sock.recvmsg(0, socket.CMSG_LEN(4))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
@ -49,22 +50,26 @@ def recv_fd(sock):
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return list(fds)[0]
VEC_LEN_FMT = Struct('I')
VEC_LEN_FMT = Struct("I")
def get_vec_len(stats, vector_offset):
'''Equivalent to VPP vec_len()'''
"""Equivalent to VPP vec_len()"""
return VEC_LEN_FMT.unpack_from(stats.statseg, vector_offset - 8)[0]
def get_string(stats, ptr):
'''Get a string from a VPP vector'''
"""Get a string from a VPP vector"""
namevector = ptr - stats.base
namevectorlen = get_vec_len(stats, namevector)
if namevector + namevectorlen >= stats.size:
raise IOError('String overruns stats segment')
return stats.statseg[namevector:namevector+namevectorlen-1].decode('ascii')
raise IOError("String overruns stats segment")
return stats.statseg[namevector : namevector + namevectorlen - 1].decode("ascii")
class StatsVector:
'''A class representing a VPP vector'''
"""A class representing a VPP vector"""
def __init__(self, stats, ptr, fmt):
self.vec_start = ptr - stats.base
@ -76,28 +81,35 @@ class StatsVector:
self.stats = stats
if self.vec_start + self.vec_len * self.elementsize >= stats.size:
raise IOError('Vector overruns stats segment')
raise IOError("Vector overruns stats segment")
def __iter__(self):
with self.stats.lock:
return self.struct.iter_unpack(self.statseg[self.vec_start:self.vec_start +
self.elementsize*self.vec_len])
return self.struct.iter_unpack(
self.statseg[
self.vec_start : self.vec_start + self.elementsize * self.vec_len
]
)
def __getitem__(self, index):
if index > self.vec_len:
raise IOError('Index beyond end of vector')
raise IOError("Index beyond end of vector")
with self.stats.lock:
if self.fmtlen == 1:
return self.struct.unpack_from(self.statseg, self.vec_start +
(index * self.elementsize))[0]
return self.struct.unpack_from(self.statseg, self.vec_start +
(index * self.elementsize))
return self.struct.unpack_from(
self.statseg, self.vec_start + (index * self.elementsize)
)[0]
return self.struct.unpack_from(
self.statseg, self.vec_start + (index * self.elementsize)
)
class VPPStats:
"""Main class implementing Python access to the VPP statistics segment"""
class VPPStats():
'''Main class implementing Python access to the VPP statistics segment'''
# pylint: disable=too-many-instance-attributes
shared_headerfmt = Struct('QPQQPP')
default_socketname = '/run/vpp/stats.sock'
shared_headerfmt = Struct("QPQQPP")
default_socketname = "/run/vpp/stats.sock"
def __init__(self, socketname=default_socketname, timeout=10):
self.socketname = socketname
@ -110,7 +122,7 @@ class VPPStats():
self.statseg = 0
def connect(self):
'''Connect to stats segment'''
"""Connect to stats segment"""
if self.connected:
return
sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
@ -120,61 +132,64 @@ class VPPStats():
sock.close()
stat_result = os.fstat(mfd)
self.statseg = mmap.mmap(mfd, stat_result.st_size, mmap.PROT_READ, mmap.MAP_SHARED)
self.statseg = mmap.mmap(
mfd, stat_result.st_size, mmap.PROT_READ, mmap.MAP_SHARED
)
os.close(mfd)
self.size = stat_result.st_size
if self.version != 2:
raise Exception('Incompatbile stat segment version {}'
.format(self.version))
raise Exception("Incompatbile stat segment version {}".format(self.version))
self.refresh()
self.connected = True
def disconnect(self):
'''Disconnect from stats segment'''
"""Disconnect from stats segment"""
if self.connected:
self.statseg.close()
self.connected = False
@property
def version(self):
'''Get version of stats segment'''
"""Get version of stats segment"""
return self.shared_headerfmt.unpack_from(self.statseg)[0]
@property
def base(self):
'''Get base pointer of stats segment'''
"""Get base pointer of stats segment"""
return self.shared_headerfmt.unpack_from(self.statseg)[1]
@property
def epoch(self):
'''Get current epoch value from stats segment'''
"""Get current epoch value from stats segment"""
return self.shared_headerfmt.unpack_from(self.statseg)[2]
@property
def in_progress(self):
'''Get value of in_progress from stats segment'''
"""Get value of in_progress from stats segment"""
return self.shared_headerfmt.unpack_from(self.statseg)[3]
@property
def directory_vector(self):
'''Get pointer of directory vector'''
"""Get pointer of directory vector"""
return self.shared_headerfmt.unpack_from(self.statseg)[4]
elementfmt = 'IQ128s'
elementfmt = "IQ128s"
def refresh(self, blocking=True):
'''Refresh directory vector cache (epoch changed)'''
"""Refresh directory vector cache (epoch changed)"""
directory = {}
directory_by_idx = {}
while True:
try:
with self.lock:
self.last_epoch = self.epoch
for i, direntry in enumerate(StatsVector(self, self.directory_vector, self.elementfmt)):
path_raw = direntry[2].find(b'\x00')
path = direntry[2][:path_raw].decode('ascii')
for i, direntry in enumerate(
StatsVector(self, self.directory_vector, self.elementfmt)
):
path_raw = direntry[2].find(b"\x00")
path = direntry[2][:path_raw].decode("ascii")
directory[path] = StatsEntry(direntry[0], direntry[1])
directory_by_idx[i] = path
self.directory = directory
@ -200,14 +215,12 @@ class VPPStats():
def __iter__(self):
return iter(self.directory.items())
def set_errors(self, blocking=True):
'''Return dictionary of error counters > 0'''
"""Return dictionary of error counters > 0"""
if not self.connected:
self.connect()
errors = {k: v for k, v in self.directory.items()
if k.startswith("/err/")}
errors = {k: v for k, v in self.directory.items() if k.startswith("/err/")}
result = {}
for k in errors:
try:
@ -219,23 +232,23 @@ class VPPStats():
return result
def set_errors_str(self, blocking=True):
'''Return all errors counters > 0 pretty printed'''
error_string = ['ERRORS:']
"""Return all errors counters > 0 pretty printed"""
error_string = ["ERRORS:"]
error_counters = self.set_errors(blocking)
for k in sorted(error_counters):
error_string.append('{:<60}{:>10}'.format(k, error_counters[k]))
return '%s\n' % '\n'.join(error_string)
error_string.append("{:<60}{:>10}".format(k, error_counters[k]))
return "%s\n" % "\n".join(error_string)
def get_counter(self, name, blocking=True):
'''Alternative call to __getitem__'''
"""Alternative call to __getitem__"""
return self.__getitem__(name, blocking)
def get_err_counter(self, name, blocking=True):
'''Alternative call to __getitem__'''
"""Alternative call to __getitem__"""
return self.__getitem__(name, blocking).sum()
def ls(self, patterns):
'''Returns list of counters matching pattern'''
"""Returns list of counters matching pattern"""
# pylint: disable=invalid-name
if not self.connected:
self.connect()
@ -245,11 +258,14 @@ class VPPStats():
if self.last_epoch != self.epoch:
self.refresh()
return [k for k, v in self.directory.items()
if any(re.match(pattern, k) for pattern in regex)]
return [
k
for k, v in self.directory.items()
if any(re.match(pattern, k) for pattern in regex)
]
def dump(self, counters, blocking=True):
'''Given a list of counters return a dictionary of results'''
"""Given a list of counters return a dictionary of results"""
if not self.connected:
self.connect()
result = {}
@ -257,8 +273,9 @@ class VPPStats():
result[cnt] = self.__getitem__(cnt, blocking)
return result
class StatsLock():
'''Stat segment optimistic locking'''
class StatsLock:
"""Stat segment optimistic locking"""
def __init__(self, stats):
self.stats = stats
@ -273,7 +290,7 @@ class StatsLock():
self.release()
def acquire(self, blocking=True, timeout=-1):
'''Acquire the lock. Await in progress to go false. Record epoch.'''
"""Acquire the lock. Await in progress to go false. Record epoch."""
self.epoch = self.stats.epoch
if timeout > 0:
start = time.monotonic()
@ -286,46 +303,49 @@ class StatsLock():
return True
def release(self):
'''Check if data read while locked is valid'''
"""Check if data read while locked is valid"""
if self.stats.in_progress or self.stats.epoch != self.epoch:
raise IOError('Optimistic lock failed, retry')
raise IOError("Optimistic lock failed, retry")
def locked(self):
'''Not used'''
"""Not used"""
class StatsCombinedList(list):
'''Column slicing for Combined counters list'''
"""Column slicing for Combined counters list"""
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
"""Supports partial numpy style 2d support. Slice by column [:,1]"""
if isinstance(item, int):
return list.__getitem__(self, item)
return CombinedList([row[item[1]] for row in self])
class CombinedList(list):
'''Combined Counters 2-dimensional by thread by index of packets/octets'''
"""Combined Counters 2-dimensional by thread by index of packets/octets"""
def packets(self):
'''Return column (2nd dimension). Packets for all threads'''
"""Return column (2nd dimension). Packets for all threads"""
return [pair[0] for pair in self]
def octets(self):
'''Return column (2nd dimension). Octets for all threads'''
"""Return column (2nd dimension). Octets for all threads"""
return [pair[1] for pair in self]
def sum_packets(self):
'''Return column (2nd dimension). Sum of all packets for all threads'''
"""Return column (2nd dimension). Sum of all packets for all threads"""
return sum(self.packets())
def sum_octets(self):
'''Return column (2nd dimension). Sum of all octets for all threads'''
"""Return column (2nd dimension). Sum of all octets for all threads"""
return sum(self.octets())
class StatsTuple(tuple):
'''A Combined vector tuple (packets, octets)'''
"""A Combined vector tuple (packets, octets)"""
def __init__(self, data):
self.dictionary = {'packets': data[0], 'bytes': data[1]}
self.dictionary = {"packets": data[0], "bytes": data[1]}
super().__init__()
def __repr__(self):
@ -334,28 +354,32 @@ class StatsTuple(tuple):
def __getitem__(self, item):
if isinstance(item, int):
return tuple.__getitem__(self, item)
if item == 'packets':
if item == "packets":
return tuple.__getitem__(self, 0)
return tuple.__getitem__(self, 1)
class StatsSimpleList(list):
'''Simple Counters 2-dimensional by thread by index of packets'''
"""Simple Counters 2-dimensional by thread by index of packets"""
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
"""Supports partial numpy style 2d support. Slice by column [:,1]"""
if isinstance(item, int):
return list.__getitem__(self, item)
return SimpleList([row[item[1]] for row in self])
class SimpleList(list):
'''Simple counter'''
"""Simple counter"""
def sum(self):
'''Sum the vector'''
"""Sum the vector"""
return sum(self)
class StatsEntry():
'''An individual stats entry'''
class StatsEntry:
"""An individual stats entry"""
# pylint: disable=unused-argument,no-self-use
def __init__(self, stattype, statvalue):
@ -376,47 +400,48 @@ class StatsEntry():
self.function = self.illegal
def illegal(self, stats):
'''Invalid or unknown counter type'''
"""Invalid or unknown counter type"""
return None
def scalar(self, stats):
'''Scalar counter'''
"""Scalar counter"""
return self.value
def simple(self, stats):
'''Simple counter'''
"""Simple counter"""
counter = StatsSimpleList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [v[0] for v in StatsVector(stats, threads[0], 'Q')]
for threads in StatsVector(stats, self.value, "P"):
clist = [v[0] for v in StatsVector(stats, threads[0], "Q")]
counter.append(clist)
return counter
def combined(self, stats):
'''Combined counter'''
"""Combined counter"""
counter = StatsCombinedList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [StatsTuple(cnt) for cnt in StatsVector(stats, threads[0], 'QQ')]
for threads in StatsVector(stats, self.value, "P"):
clist = [StatsTuple(cnt) for cnt in StatsVector(stats, threads[0], "QQ")]
counter.append(clist)
return counter
def name(self, stats):
'''Name counter'''
"""Name counter"""
counter = []
for name in StatsVector(stats, self.value, 'P'):
for name in StatsVector(stats, self.value, "P"):
if name[0]:
counter.append(get_string(stats, name[0]))
return counter
SYMLINK_FMT1 = Struct('II')
SYMLINK_FMT2 = Struct('Q')
SYMLINK_FMT1 = Struct("II")
SYMLINK_FMT2 = Struct("Q")
def symlink(self, stats):
'''Symlink counter'''
"""Symlink counter"""
b = self.SYMLINK_FMT2.pack(self.value)
index1, index2 = self.SYMLINK_FMT1.unpack(b)
name = stats.directory_by_idx[index1]
return stats[name][:, index2]
def get_counter(self, stats):
'''Return a list of counters'''
"""Return a list of counters"""
if stats:
return self.function(stats)