Start from clean GIT

This commit is contained in:
Tim Schubert 2018-09-21 11:45:21 +02:00
commit e841f14a95
2112 changed files with 6638085 additions and 0 deletions

283
tools/consumption.py Executable file
View file

@ -0,0 +1,283 @@
#!/usr/bin/env python
import argparse
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from rpl import analysis, data
tubspalette = ['#be1e3c', '#ffc82a', '#e16d00', '#711c2f', '#acc13a', '#6d8300', '#00534a', '#66b4d3', '#007a9b', '#003f57', '#8a307f', '#511246', '#4c1830']
phase_names = ['N', 'R', 'H', 'HR', 'HS', 'HSR']
versions = ['Contiki', 'Hardened', 'Hardened with UIDs']
node_positions = {
'm3-59': (0,0),
'm3-57': (0,1),
'm3-53': (0,3),
'm3-51': (0,4),
'm3-49': (0,5),
'm3-47': (0,6),
'm3-95': (1,0),
'm3-93': (1,1),
'm3-91': (1,2),
'm3-89': (1,3),
'm3-87': (1,4),
'm3-85': (1,5),
'm3-83': (1,6),
'm3-133': (2,1),
'm3-131': (2,2),
'm3-127': (2,4),
'm3-123': (2,6),
'm3-161': (3,1),
'm3-159': (3,2),
'm3-157': (3,3),
'm3-155': (3,4),
'm3-153': (3,5),
'm3-151': (3,6),
'm3-204': (4,0),
'm3-202': (4,1),
'm3-200': (4,2),
'm3-198': (4,3),
'm3-196': (4,4),
'm3-194': (4,5),
'm3-192': (4,6),
'm3-230': (5,0),
'm3-228': (5,1),
'm3-226': (5,2),
'm3-224': (5,3),
'm3-222': (5,4),
'm3-220': (5,5),
'm3-218': (5,6),
'm3-256': (6,0),
'm3-254': (6,1),
'm3-252': (6,2),
'm3-250': (6,3),
'm3-248': (6,4),
'm3-246': (6,5),
'm3-244': (6,6)
}
def consumption_network_phases(db, phaselen, out):
consumptions = db.execute('''SELECT phase, (phase - 1)/ 2, SUM(consumption) / ? FROM consumption GROUP BY phase, expid''', (phaselen,))
data = np.array([(phase_names[phase-1], versions[int(reset)], cons) for phase, reset, cons in consumptions],
dtype=[('Phase', 'U3'), ('Version', 'U30'), ('Consumption', 'f')])
sns.boxplot(x='Phase', y='Consumption', hue='Version', data=pd.DataFrame(data))
def consumption_node_phases(db, phaselen, out):
consumptions = db.execute('''
SELECT host, phase, consumption / ?
FROM consumption
''', (phaselen,))
data = np.array(
[(host, phase_names[phase-1], cons) for host, phase, cons in consumptions],
dtype=[('node', 'U6'), ('phase', 'U3'), ('consumption', np.float16)])
sns.boxplot(x='phase', y='consumption', hue='node', data=pd.DataFrame(data))
def restart_energy_consumption(db, out):
consumptions = db.execute('''
SELECT resets.phase, resets.timestamp, SUM(consumption)
FROM resets
JOIN consumption
ON resets.phase = consumption.phase
AND resets.expid = consumption.expid
GROUP BY consumption.phase, consumption.expid
''')
data = np.array(
[(versions[int((phase / 2) - 1)], reset, cons) for phase, reset, cons in consumptions.fetchall()], dtype=[('Phase', 'U20'), ('Reset Time', 'f'), ('Consumption', 'f')]
)
grid = sns.FacetGrid(pd.DataFrame(data), col='Phase')
grid = grid.map(plt.scatter, 'Reset Time', 'Consumption')
def consumption_phases(db):
# NOTE excludes sink node and resetting node
consumptions = db.execute('''
SELECT ps.phase,
SUM(consumption),
(ps.phase - 1) % 2 = 1
FROM consumption AS c
JOIN phases AS ps
ON c.phase = ps.phase AND c.expid = ps.expid
WHERE c.host != 'm3-200' AND c.host != 'm3-157'
GROUP BY ps.expid, ps.phase''')
def _format():
for p, c, m in consumptions:
yield phase_names[p-1], c, m
data = np.array(list(_format()),
dtype=[('phase', 'U3'), ('consumption', 'f'), ('reset', bool)])
return pd.DataFrame(data)
def plot_consumption_phases(db, args):
data = consumption_phases(db)
sns.boxplot(x='reset', y='consumption', hue='phase', hue_order=phase_names, data=data, palette=tubspalette)
def consumption_nodes(db):
consumptions_nodes = db.execute('''
SELECT p.phase, c.host, AVG(c.consumption)
FROM consumption AS c
JOIN phases AS p
ON p.phase = c.phase AND p.expid = c.expid
GROUP BY c.host, p.phase
''')
def _format():
for p, h, c in consumptions_nodes:
host_x, host_y = node_positions[h]
yield phase_names[p-1], int(h[3:]), host_x, host_y, c, ((p - 1) % 2 == 1)
dtypes = [
('phase', 'U3'),
('host', 'd'),
('pos_x', 'd'),
('pos_y', 'd'),
('consumption', 'f'),
('reset', bool)
]
data = np.array(list(_format()), dtype=dtypes)
return pd.DataFrame(data)
def plot_nodes_consumption(db, args):
def phase_heatmap(x, y, val, **kwargs):
data = kwargs.pop('data')
d = data.pivot(index=x, columns=y, values=val)
print(d)
hostnames = data.pivot(index=x, columns=y, values='host')
hostnames.fillna(0)
print(hostnames)
ax = sns.heatmap(d, annot=hostnames, fmt='.0f', **kwargs)
ax.invert_yaxis()
#ax.invert_xaxis()
data = consumption_nodes(db)
fgrid = sns.FacetGrid(data, col='phase', col_wrap=3, col_order=['N', 'H', 'HS', 'R', 'HR', 'HSR'])
fgrid = fgrid.map_dataframe(phase_heatmap, 'pos_y', 'pos_x',
'consumption', cbar=False, square=False,
cmap=sns.light_palette('#711c2f'))
def rank_neighbors_consumption(db):
rnc = db.execute('''
SELECT n.phase, n.host, n.avg_rank, n.avg_neighbors, c.consumption
FROM dag_nodes AS n
JOIN consumption AS c ON c.expid = n.expid AND c.phase = n.phase
JOIN phases AS p ON p.expid = c.expid AND p.phase = c.phase
GROUP BY n.expid, n.phase, n.host
''')
def _format():
for p, h, r, n, c in rnc:
yield phase_names[p-1], h[3:], r, n, c
data = np.array(list(_format()), dtype=[('phase', 'U3'), ('host', 'U10'),
('rank', 'f'), ('neighbors', 'f'),
('consumption', 'f')])
return pd.DataFrame(data)
def plot_rank_neighbors_consumption(db, args):
data = rank_neighbors_consumption(db)
#sns.pairplot(data,
# hue='phase',
# kind='reg',
# diag_kind='hist',
# vars=['rank', 'consumption'],
# markers="+",
# palette=tubspalette)
sns.lmplot(x="rank", y="consumption", hue="phase",
truncate=True, data=data, markers='+')
def consumption_hosts(db, hosts):
qms = '?' * len(hosts)
query = 'SELECT c.phase, c.host, c.consumption FROM consumption AS c JOIN phases AS p ON p.phase = c.phase AND p.expid = c.expid WHERE c.host IN ({})'.format(','.join(qms))
csh = db.execute(query, hosts)
def _format():
for p, h, c in csh:
yield phase_names[p-1], h[3:], c
data = np.array(list(_format()),
dtype=[('phase', 'U3'), ('host', 'U10'), ('consumption', 'f')])
return pd.DataFrame(data)
def plot_consumption_hosts(db, args):
hcons = consumption_hosts(db, args.hosts)
sns.boxplot(x='phase', y='consumption', hue='host', data=hcons)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process file names.')
subparsers = parser.add_subparsers(help='subcommand')
phases_cmd = subparsers.add_parser('phases')
phases_cmd.add_argument('--database', '-d', type=str, nargs=1, help='a sqlite3 database file', default=['db.sqlite3'])
phases_cmd.add_argument('--out', '-o', type=str, nargs=1, help='output file for the plot')
phases_cmd.set_defaults(func=plot_consumption_phases)
nodes_cmd = subparsers.add_parser('nodes')
nodes_cmd.add_argument('--database', '-d', type=str, nargs=1, help='a sqlite3 database file', default=['db.sqlite3'])
nodes_cmd.add_argument('--out', '-o', type=str, nargs=1, help='output file for the plot')
nodes_cmd.set_defaults(func=plot_nodes_consumption)
rank_neighbors_cmd = subparsers.add_parser('regress')
rank_neighbors_cmd.add_argument('--database', '-d', type=str, nargs=1, help='a sqlite3 database file', default=['db.sqlite3'])
rank_neighbors_cmd.add_argument('--out', '-o', type=str, nargs=1, help='output file for the plot')
rank_neighbors_cmd.set_defaults(func=plot_rank_neighbors_consumption)
hosts_cmd = subparsers.add_parser('hosts')
hosts_cmd.add_argument('--database', '-d', type=str, nargs=1, help='a sqlite3 database file', default=['db.sqlite3'])
hosts_cmd.add_argument('--out', '-o', type=str, nargs=1, help='output file for the plot')
hosts_cmd.add_argument('hosts', type=str, nargs='+', help='hosts to plot')
hosts_cmd.set_defaults(func=plot_consumption_hosts)
plt.figure()
sns.set()
sns.set(font='NexusSerifPro')
sns.set_palette(tubspalette)
args = parser.parse_args()
db = data.init_db(args.database[0])
args.func(db, args)
plt.savefig(args.out[0])

95
tools/parselogs.py Executable file
View file

@ -0,0 +1,95 @@
#!/usr/bin/env python3
import argparse
import rpl.data as p
import os
import cProfile
import pandas as pd
from sys import exit
from os.path import dirname, basename
def dumptable(db, tablename):
dump = db.execute("SELECT * FROM %s" % tablename)
for l in dump:
print(l)
def parselogs(args):
db = p.init_db(args.database[0])
expid = args.experiment[0]
if args.logs:
for f in args.logs:
if 'm3-200' in f:
args.logs.remove(f)
print('Ignoring log file for resetting node %s' % 'm3-200')
try:
logs = [open(log, 'r') for log in args.logs]
p.parse_events(db, expid, logs)
except IOError:
print("Failed to open files")
phases = db.execute('''
SELECT phase, tstart, tstop
FROM phases
WHERE expid = ?
''', (expid, )).fetchall()
if args.serial:
with open(args.serial[0], 'r') as serial:
p.parse_addresses(db, expid, serial)
addresses = pd.DataFrame(list(db.execute('SELECT * FROM addresses')), columns=['host', 'address']).set_index('address')
with open(args.serial[0], 'r') as serial:
p.parse_end_to_end(db, expid, phases, addresses, serial)
with open(args.serial[0], 'r') as serial:
p.parse_count_messages(db, expid, phases, serial)
with open(args.serial[0], 'r') as serial:
p.parse_dag(db, expid, phases, serial)
with open(args.serial[0], 'r') as serial:
p.parse_parents(db, expid, phases, serial)
with open(args.serial[0], 'r') as serial:
p.parse_default_routes(db, expid, phases, serial)
if args.consumption:
for log in args.consumption:
hostname = basename(log).split('.')[0]
with open(log, 'r') as oml:
p.parse_consumption(db, expid, phases, hostname, oml)
db.commit()
#dumptable(db, 'addresses')
#dumptable(db, 'phases')
#dumptable(db, 'consumption')
#dumptable(db, 'dag_nodes')
#dumptable(db, 'dag_edges')
#dumptable(db, 'resets')
#dumptable(db, 'end_to_end')
#dumptable(db, 'dag_evolution')
#dumptable(db, 'default_route_changes')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process file names.')
parser.add_argument('experiment', nargs=1, help='experiment id')
parser.add_argument('--database', '-b', type=str, nargs=1, help='a sqlite3 database file', default=[':memory:'])
parser.add_argument('--serial', '-s', type=str, nargs=1, help='a serial log file (ASCII)')
parser.add_argument('--sniffer', '-n', type=str, nargs='+', help='a sniffer log file (PCAP)')
parser.add_argument('--consumption', '-c', type=str, nargs='+', help='a list of consumption logs (OML)')
parser.add_argument('--logs', '-l', type=str, nargs='+', help='a list of experiment logs (OML)')
parser.add_argument('--radio', '-r', type=str, nargs='+', help='a list of RSSI logs (OML)')
args = parser.parse_args()
parselogs(args)

205
tools/performance.py Executable file
View file

@ -0,0 +1,205 @@
#!/usr/bin/env python
import argparse
import rpl.data as p
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
from os.path import dirname, basename
tubspalette = ['#be1e3c', '#ffc82a', '#e16d00', '#711c2f', '#acc13a', '#6d8300', '#00534a', '#66b4d3', '#007a9b', '#003f57', '#8a307f', '#511246', '#4c1830']
phase_names = ['N', 'R', 'H', 'HR', 'HS', 'HSR']
versions = ['Contiki', 'Hardened', 'Hardened with UIDs']
def plot_consumption(db, args):
data = db.execute(
'''
SELECT c.phase, SUM(consumption), SUM(dios), SUM(daos), SUM(dis)
FROM overhead AS o
JOIN consumption AS c ON c.expid = o.expid AND c.phase = o.phase AND c.host = o.source
GROUP BY o.expid, o.phase
''')
data = pd.DataFrame(data.fetchall(), columns=['phase', 'consumption', 'dios', 'daos', 'dis'])
data['phase'] = data['phase'].map({1: 'N', 2: 'R', 3: 'H', 4: 'HR', 5: 'HS', 6: 'HSR'})
sns.pairplot(data, hue='phase', x_vars=['dios', 'daos', 'dis'], y_vars=['consumption'], kind='reg', markers='.')
def plot_overhead(db, args):
overheads = db.execute(
'''
SELECT phase, SUM(dios), SUM(daos), SUM(dis), (phase - 1) % 2 == 1
FROM overhead
WHERE source != 'm3-200'
GROUP BY expid, phase
''')
data = pd.DataFrame(overheads.fetchall(), columns=['phase', 'dios', 'daos', 'dis', 'reset'])
dios = data[['phase', 'reset', 'dios']]
dios['message type'] = 'dio'
dios = dios.rename(index=str, columns={'dios': 'count'})
daos = data[['phase', 'reset', 'daos']]
daos['message type'] = 'dao'
daos = daos.rename(index=str, columns={'daos': 'count'})
dis= data[['phase', 'reset', 'dis']]
dis['message type'] = 'dis'
dis = dis.rename(index=str, columns={'dis': 'count'})
frame = [dios, daos, dis]
data = pd.concat(frame)
data['phase'] = data['phase'].map({1: 'N', 2: 'R', 3: 'H', 4: 'HR', 5: 'HS', 6: 'HSR'})
print(data)
sns.barplot(data=data, hue='phase', y='message type', x='count')
def plot_packet_loss(db, args):
changes_vs_delay = db.execute('''
WITH s AS (
SELECT expid, phase, source, COUNT(nexthop) AS stab
FROM default_route_changes
GROUP BY expid, phase, source)
SELECT e.phase, s.stab / (tstop - tstart), loss, (e.phase - 1) / 2
FROM end_to_end AS e
JOIN s ON s.expid = e.expid AND e.phase = s.phase AND s.source = e.source
JOIN phases AS p ON p.expid = e.expid AND p.phase = e.phase
''')
data = pd.DataFrame(changes_vs_delay.fetchall(), columns=['phase', 'changes', 'loss', 'firmware'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
g = sns.pairplot(data, diag_kind='kde', kind='reg', hue='phase', vars=['changes', 'loss'])
def plot_rank_loss(db, args):
rank_vs_loss = db.execute(
'''
SELECT de.phase, loss, avg_metric, avg_rank
FROM dag_edges AS de
JOIN end_to_end AS e2e ON e2e.expid = de.expid AND e2e.phase = de.phase AND e2e.source = de.source
GROUP BY de.expid, de.phase
''')
data = pd.DataFrame(rank_vs_loss.fetchall(), columns=['phase', 'loss', 'metric', 'rank'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
g = sns.pairplot(data, diag_kind='hist', kind='reg', hue='phase', vars=['loss', 'metric', 'rank'])
def plot_delay(db, args):
data = db.execute(
'''
SELECT phase, delay
FROM end_to_end
WHERE source != 'm3-200'
GROUP BY expid, phase
''')
data = pd.DataFrame(data.fetchall(), columns=['phase', 'delay'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
for phase in range(1,7):
pdata = data[data['phase'] == phase]
ax = sns.distplot(pdata[['delay']], hist=False, label=phase_names[phase-1])
ax.set_xlabel('delay')
ax.legend()
def plot_jitter(db, args):
data = db.execute(
'''
SELECT phase, jitter
FROM end_to_end
WHERE source != 'm3-200'
GROUP BY expid, phase
''')
data = pd.DataFrame(data.fetchall(), columns=['phase', 'jitter'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
for phase in range(1,7):
pdata = data[data['phase'] == phase]
ax = sns.distplot(pdata[['jitter']], hist=False, label=phase_names[phase-1])
ax.set_xlabel('jitter')
ax.legend()
def plot_loss(db, args):
data = db.execute(
'''
SELECT phase, loss
FROM end_to_end
WHERE source != 'm3-200'
GROUP BY expid, phase
''')
data = pd.DataFrame(data.fetchall(), columns=['phase', 'loss'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
for phase in range(1,7):
pdata = data[data['phase'] == phase]
ax = sns.distplot(pdata[['loss']], hist=False, label=phase_names[phase-1])
ax.set_xlabel('loss')
ax.legend()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process file names.')
subp = parser.add_subparsers(help='command')
# overhead (dio,dao,dis) vs. phase
overhead = subp.add_parser('overhead')
overhead.set_defaults(func=plot_overhead)
overhead.add_argument('--database', '-d', nargs=1)
overhead.add_argument('--output' , '-o', nargs=1)
consumption = subp.add_parser('consumption')
consumption.set_defaults(func=plot_consumption)
consumption.add_argument('--database', '-d', nargs=1)
consumption.add_argument('--output' , '-o', nargs=1)
loss = subp.add_parser('loss')
loss.set_defaults(func=plot_loss)
loss.add_argument('--database', '-d', nargs=1)
loss.add_argument('--output', '-o', nargs=1)
delay = subp.add_parser('delay')
delay.set_defaults(func=plot_delay)
delay.add_argument('--database', '-d', nargs=1)
delay.add_argument('--output', '-o', nargs=1)
jitter = subp.add_parser('jitter')
jitter.set_defaults(func=plot_jitter)
jitter.add_argument('--database', '-d', nargs=1)
jitter.add_argument('--output', '-o', nargs=1)
args = parser.parse_args()
db = p.init_db(args.database[0])
plt.figure()
sns.set()
sns.set(font='NexusSerifPro')
sns.set_palette(tubspalette)
args.func(db, args)
plt.savefig(args.output[0])

25
tools/requirements Normal file
View file

@ -0,0 +1,25 @@
certifi==2017.11.5
chardet==3.0.4
cycler==0.10.0
decorator==4.1.2
idna==2.6
iotlabcli==2.5.2
jmespath==0.9.3
matplotlib==2.1.1
networkx==2.0
numpy==1.14.0
oml-plot-tools==0.6.0
parse==1.8.2
Pillow==5.0.0
pluggy==0.5.2
py==1.5.2
pygraphviz==1.3.1
pyparsing==2.2.0
python-dateutil==2.6.1
pytz==2017.3
requests==2.18.4
six==1.11.0
test==2.3.4.5
tox==2.9.1
urllib3==1.22
virtualenv==15.1.0

0
tools/rpl/__init__.py Normal file
View file

37
tools/rpl/analysis.py Normal file
View file

@ -0,0 +1,37 @@
#!/usr/bin/env/python
import json
import numpy
from sys import argv
from datetime import time, datetime
def node_name(node_name):
return node_name.split('.')[0]
CPU_M3 = { 'current': 0.050, 'voltage': 3.3 }
RADIO_M3_SLEEP = { 'current': 0.00002, 'voltage': 3.3 }
RADIO_M3_TRX_OFF = { 'current': 0.0004, 'voltage': 3.3 }
RADIO_M3_RX_ON = { 'current': 0.0103, 'voltage': 3.3 }
""" TODO not included in the data sheet, only values for +3dBm, 0dBm and -17dBm,
so more a guess based on what the CN measured
"""
RADIO_M3_BUSY_TX = { 'current': 0.010, 'voltage': 3.3 }
RTIME_SECOND_M3 = 1
POWERTRACE_INTERVAL = 1
def consumption(energest, current, voltage):
"""mW"""
return energest * current * voltage / (RTIME_SECOND_M3 * POWERTRACE_INTERVAL)
def duty_cycle(tx, rx, cpu, lpm):
"""%"""
return (tx + rx) / (cpu, lpm)

669
tools/rpl/data.py Normal file
View file

@ -0,0 +1,669 @@
import json
import sqlite3
import numpy as np
import pandas as pd
from os.path import basename
from datetime import time, datetime
from parse import parse, compile
from itertools import groupby
dio_parser = compile("{:f};{};DIO\n")
dao_parser = compile("{:f};{};DAO\n")
dis_parser = compile("{:f};{};DIS\n")
node_start_parser = compile("{:f};{};GO!\n")
event_parser = compile("{timestamp} :: {type} :: {text}\n")
consumption_parser = compile("{timestamp:f}\t{key:d}\t{id:d}\t{seconds}\t{subseconds}\t{power:f}\t{voltage:f}\t{current:f}\n")
power_parser = compile("{timestamp:f}\t{key:d}\t{id:d}\t{seconds}\t{subseconds}\t{power:f}\t{voltage}\t{current}\n")
radio_parser = compile("{timestamp:f}\t{key:d}\t{id:d}\t{seconds:d}\t{subseconds:d}\t{channel:d}\t{rssi:d}\n")
address_parser = compile("{timestamp:f};{host};ADDR;{address}\n")
neighbor_parser = compile("{timestamp:f};{host};NEIGHBOR;{address};{isrouter:d};{state:d}\n")
default_route_parser = compile("{timestamp:f};{host};DEFAULT;{address};{lifetime:d};{infinite:d}\n")
route_parser = compile("{timestamp:f};{host};ROUTE;{address};{nexthop};{lifetime:d};{dao_seqno_out:d};{dao_seqno_in:d}\n")
dag_parser = compile("{timestamp:f};{host};DAG;{mop};{ocp};{rank:d};{interval:d};{neighbor_count:d}\n")
parent_parser = compile("{timestamp:f};{host};PARENT;{address};{rank:d};{metric:d};{rank_via_parent:d};{freshness:d};{isfresh:d};{preferred:d};{last_tx:d}\n")
powertrace_parser = compile("{timestamp:f};{host};P;{cpu:d};{lpm:d};{tx:d};{tx_idle:d};{rx:d};{rx_idle:d}\n")
spowertrace_parser = compile("{timestamp:f};{host};SP;{channel:d};{inputs:d};{tx_in:d};{rx_in:d};{outputs:d};{tx_out:d};{rx_out:d}\n")
payload_parser = compile("{:f};{};DATA;{};{};{:d}\n")
def init_db(dbpath):
db = sqlite3.connect(dbpath)
db.execute('''
CREATE TABLE IF NOT EXISTS phases (
expid INTEGER,
phase INTEGER,
tstart FLOAT,
tstop FLOAT,
PRIMARY KEY (expid, phase)
)
''')
# assume static addresses...
db.execute('''
CREATE TABLE IF NOT EXISTS addresses (
host TEXT,
address TEXT,
PRIMARY KEY (address)
)''')
db.execute('''
CREATE TABLE IF NOT EXISTS consumption (
expid INTEGER,
phase INTEGER,
host TEXT,
consumption FLOAT,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
PRIMARY KEY (expid, phase, host)
)
''')
db.execute('''
CREATE TABLE IF NOT EXISTS default_route_changes (
expid INTEGER,
phase INTEGER,
source TEXT,
nexthop TEXT,
tchange FLOAT,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase)
FOREIGN KEY (source) REFERENCES addresses(address),
FOREIGN KEY (nexthop) REFERENCES addresses(address),
PRIMARY KEY (expid, phase, source, nexthop, tchange)
)
''')
db.execute('''
CREATE TABLE IF NOT EXISTS dag_nodes (
expid INTEGER,
phase INTEGER,
host TEXT,
mop BYTE,
ocp BYTE,
avg_rank FLOAT,
avg_neighbors FLOAT,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
PRIMARY KEY (expid, phase, host)
)
''')
db.execute('''
CREATE TABLE IF NOT EXISTS dag_edges (
expid INTEGER,
phase INTEGER,
source TEXT,
destination TEXT,
avg_rank FLOAT,
avg_metric FLOAT,
count_preferred INTEGER,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
FOREIGN KEY (expid, phase, source) REFERENCES dag_nodes(expid, phase, host),
FOREIGN KEY (expid, phase, destination) REFERENCES dag_nodes(expid, phase, host),
PRIMARY KEY (expid, phase, source, destination)
)
''')
db.execute('''
CREATE TABLE IF NOT EXISTS dag_evolution (
expid INTEGER,
phase INTEGER,
tchange FLOAT,
host TEXT,
parent TEXT,
FOREIGN KEY (expid, phase, host, parent) REFERENCES dag_edges(expid, phase, source, destination),
PRIMARY KEY (expid, phase, tchange, host)
)
''')
db.execute('''
CREATE TABLE IF NOT EXISTS end_to_end (
expid INTEGER,
phase INTEGER,
source TEXT,
delay FLOAT,
jitter FLOAT,
loss FLOAT,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
FOREIGN KEY (expid, phase, source) REFERENCES dag_nodes(expid, phase, host),
PRIMARY KEY (expid, phase, source)
)''')
db.execute('''
CREATE TABLE IF NOT EXISTS overhead (
expid INTEGER,
phase INTEGER,
source INTEGER,
dios INTEGER,
daos INTEGER,
dis INTEGER,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
FOREIGN KEY (expid, phase, source) REFERENCES dag_nodes(expid, phase, host),
PRIMARY KEY (expid, phase, source))
''')
db.execute('''
CREATE TABLE IF NOT EXISTS resets (
expid INTEGER,
phase INTEGER,
host TEXT,
timestamp FLOAT,
FOREIGN KEY (expid, phase) REFERENCES phases(expid, phase),
PRIMARY KEY (expid, phase)
)
''')
return db
def find_phase(phases, timestamp):
for name, start, stop in phases:
if start <= timestamp and timestamp < stop:
return name
def __process_consumptions(phases, consumptions):
def _format():
for line in consumptions:
timestamp = float(line['seconds'] + '.' + line['subseconds'])
yield timestamp, line['power']
consum = np.array(list(_format()), dtype=[('timestamp', 'f'), ('power', 'f')])
data = pd.pivot_table(pd.DataFrame(consum), values='power', index='timestamp')
for name, start, stop in phases:
vals = data.loc[start:stop]
mean = vals['power'].mean()
yield name, float(mean)
def __store_consumption(db, expid, host, consumptions):
def _format():
for phase, consumption in consumptions:
yield expid, phase, host, consumption
db.executemany(
'''INSERT OR REPLACE INTO consumption VALUES (?,?,?,?)''',
_format()
)
def __run_parser(log, parser):
for line in log:
res = parser.parse(line)
# skip invalid lines
if not res:
continue
else:
yield res.named
def __process_addresses(db, expid, addresses):
def _format():
for addr in addresses:
yield addr['host'], addr['address']
db.executemany('''INSERT OR REPLACE INTO addresses VALUES (?,?)''', _format())
def __process_phases(db, expid, phases):
def _format():
i = 1
for start, stop in phases:
yield expid, i, float(start), float(stop)
i += 1
db.executemany('''INSERT OR REPLACE INTO phases VALUES (?,?,?,?)''', _format())
def __parse_weird_iso(something):
return datetime.strptime(something, '%Y-%m-%d %H:%M:%S,%f').timestamp()
def __phases_from_events(logs, phase_len=600):
flash = 'Flash firmware on open node'
pstop = 'Open power stop'
pstart = 'Open power start'
phases = pd.DataFrame()
def parse_node_log(log):
for event in __run_parser(log, event_parser):
if event['type'] == 'INFO':
timestamp = __parse_weird_iso(event['timestamp'])
if flash in event['text'] or pstart in event['text']:
yield timestamp
for log in logs:
phases.reset_index(drop=True)
ts = pd.DataFrame(parse_node_log(log), columns=[log.name]).reset_index(drop=True)
phases = phases.reset_index(drop=True)
phases[log.name] = ts
phases['min'] = phases.min(axis=1)
for m in phases['min']:
yield m, m + phase_len
def parse_events(db, expid, logs):
__process_phases(db, expid, __phases_from_events(logs))
def parse_consumption(db, expid, phases, host, log):
__store_consumption(db, expid, host, __process_consumptions(phases, __run_parser(log, power_parser)))
def parse_addresses(db, expid, log):
__process_addresses(db, expid, __run_parser(log, address_parser))
def __process_dag(phases, dags):
ranks, neighbors = dict(), dict()
phase = 0
for p in dags:
timestamp, host, rank, neighbor_c = p['timestamp'], p['host'], p['rank'], p['neighbor_count']
name, _, stop = phases[phase]
if timestamp > stop:
for host in ranks:
yield phase+1, host, p['mop'], p['ocp'], np.average(ranks[host]), np.average(neighbors[host])
ranks, neighbors = dict(), dict()
phase += 1
# check if no next phase
if phase >= len(phases):
break
name, start, stop = phases[phase]
# init lists
if not host in ranks:
ranks[host] = []
neighbors[host] = []
# check if within phase
if start <= timestamp and stop > timestamp:
ranks[host].append(rank)
neighbors[host].append(neighbor_c)
for host in ranks:
yield phase+1, host, p['mop'], p['ocp'], np.average(ranks[host]), np.average(neighbors[host])
def __store_dag_nodes(db, expid, dag_nodes):
def _format():
for n in dag_nodes:
yield (expid, ) + n
db.executemany('''INSERT OR REPLACE INTO dag_nodes VALUES (?,?,?,?,?,?,?)''', _format())
def parse_dag(db, expid, phases, log):
__store_dag_nodes(db, expid, __process_dag(phases, __run_parser(log, dag_parser)))
def __process_parents(phases, parents):
ranks, metrics, prefs = dict(), dict(), dict()
phase = 0
for p in parents:
timestamp, source, dest, rank, metric, rvp, pref = p['timestamp'], p['host'], p['address'], p['rank'], p['metric'], p['rank_via_parent'], p['preferred']
key = (source, dest)
name, _, stop = phases[phase]
if timestamp > stop:
for s, d in ranks:
yield name, s, d, np.average(ranks[(s, d)]), np.average(metrics[(s, d)]), prefs[(s, d)]
ranks, metrics, prefs = dict(), dict(), dict()
phase += 1
if not key in ranks:
ranks[key] = []
metrics[key] = []
prefs[key] = 0
ranks[key].append(rank)
metrics[key].append(metric)
prefs[key] += pref
if phase == len(phases):
break
for s, d in ranks:
yield phase+1, s, d, np.average(ranks[(s, d)]), np.average(metrics[(s, d)]), prefs[(s, d)]
def __store_dag_edges(db, expid, dag_edges):
def _format():
for phase, host, dest, rank, metric, pref in dag_edges:
yield expid, phase, host, rank, metric, pref, dest
db.executemany('''
INSERT OR REPLACE INTO dag_edges
SELECT ?, ?, ?, host, ?, ?, ?
FROM addresses
WHERE address LIKE ?
''', _format())
def __process_dag_evolution(phases, parents):
prev_pref_parent = dict()
phase = 0
for p in parents:
def parent_changed():
if p['host'] in prev_pref_parent and prev_pref_parent[p['host']] == p['address']:
return False
else:
return p['preferred']
phasename, pstart, pstop = phases[phase]
if p['timestamp'] > pstop:
phase += 1
prev_pref_parent = dict()
if phase >= len(phases):
break
phasename, pstart, pstop = phases[phase]
if p['timestamp'] < pstop and p['timestamp'] >= pstart:
# check if in phase, e.g. not between phases
if parent_changed():
yield phasename, p['timestamp'], p['host'], p['address']
prev_pref_parent[p['host']] = p['address']
def __store_dag_evolution(db, expid, evolution):
def _format():
for phase, ts, s, d in evolution:
yield expid, phase, ts, s, d
db.executemany('''
INSERT OR REPLACE INTO dag_evolution
SELECT ?,?,?,?,host
FROM addresses
WHERE address LIKE ?''', _format())
def parse_parents(db, expid, phases, log):
__store_dag_edges(db, expid, __process_parents(phases, __run_parser(log, parent_parser)))
log.seek(0)
__store_dag_evolution(db, expid, __process_dag_evolution(phases, __run_parser(log, parent_parser)))
def __process_resets(events):
second_restart = False
# filter everyy second restart of m3-200 as "reset"
for ev in events:
if 'Open power start' in ev['text']:
if second_restart:
second_restart = False
yield __parse_weird_iso(ev['timestamp'])
else:
second_restart = True
def __store_resets(db, expid, host, timestamps):
def _format():
for stamp in timestamps:
yield expid, host, stamp, stamp, stamp, expid
db.executemany('''
INSERT OR REPLACE INTO resets
SELECT ?, phase, ?, ?
FROM phases
WHERE tstart < (?)
AND (?) < tstop
AND expid = ?
''', _format())
def parse_resets(db, expid, host, log):
__store_resets(db, expid, host, __process_resets(__run_parser(log, event_parser)))
def __process_default_route_changes(phases, default_routes):
phase = 0
phase_last_def_rts = dict()
for rt in default_routes:
phase_name, _, pstop = phases[phase]
if rt['timestamp'] > pstop:
phase += 1
phase_last_def_rts = dict()
if phase >= len(phases):
break
def route_changed():
host = rt['host']
if not host in phase_last_def_rts:
return True
else:
return rt['address'] != phase_last_def_rts[host]
if route_changed():
yield phase_name, rt['timestamp'], rt['host'], rt['address'], rt['lifetime'], rt['infinite']
phase_last_def_rts[rt['host']] = rt['address']
def __store_default_route_changes(db, expid, droutes):
def _format():
for p, ts, h, addr, lt, i in droutes:
yield expid, p, h, ts, addr
db.executemany('''
INSERT OR REPLACE INTO default_route_changes
SELECT ?, ?, ?, host, ?
FROM addresses
WHERE address = ?''', _format())
def parse_default_routes(db, expid, phases, log):
__store_default_route_changes(db, expid, __process_default_route_changes(phases, __run_parser(log, default_route_parser)))
#def parse_routes(db, expid, log):
#
# __store_routes(db, expid, __process_routes(__run_parser(log, route_parser)))
#def __process_payloads(phases, payloads):
#
# # timestamp, seqnr, losses
# sends = dict()
# recvs = dict()
#
# for pl in payloads:
# src = pl['src']
# mtype = pl['type']
# seqnr = pl['seqnr']
# timestamp = pl['timestamp']
#
# if not sends[src]:
# sends[src] = []
# if not recvs[src]:
#def parse_payloads(db, expid, phases, log):
#
# __store_end_to_end(db, expid, __process_payloads(phases, __run_parser(log, payload_parser)))
#def __process_powertrace(phases, traces):
#
# energest = dict()
# phase = 0
#
# for trace in traces:
# newphase = find_phase(phases, parent['timestamp'])
# if newphase != phase:
# finalized = energest
# energest = dict()
#
# for host in finalized:
# data = finalized[host]
# yield phase, host, data['cpu'], np.average(data['ranks']), np.average(data['metrics']), data['preferred']
# phase = newphase
#
# host = parent['host']
# timestamp = parent['timestamp']
#
# if not host in hosts:
# hosts[host] = {
# 'destination': parent['address'],
# 'ranks': [],
# 'metrics': [],
# 'preferred': 0
# }
#
# hosts[host]['ranks'] += [parent['rank']]
# hosts[host]['metrics'] += [parent['metric']]
# hosts[host]['preferred'] += parent['preferred']
#def parse_powertrace(db, expid, host, log):
#
# __store_powertrace(db, expid, __process_powertrace(phases, __run_parser(log, powertrace_parser)))
def parse_count_messages(db, expid, phases, serial):
def __parse_messages():
for line in serial:
res = dio_parser.parse(line)
if res:
ts, host = res.fixed
yield 'dio', ts, host
res = dao_parser.parse(line)
if res:
ts, host = res.fixed
yield 'dao', ts, host
res = dio_parser.parse(line)
if res:
ts, host = res.fixed
yield 'dis', ts, host
data = pd.DataFrame(__parse_messages(), columns=['type', 'ts', 'host'])
def __count_messages():
for phase, start, stop in phases:
msgs = data[(start <= data['ts']) & (data['ts'] < stop)]
msgs = msgs.groupby([msgs['host'], msgs['type']]).size()
for host, group in msgs.groupby('host'):
yield expid, phase, host, int(group.get((host,'dio'), default=0)), int(group.get((host,'dao'), default=0)), int(group.get((host,'dis'), default=0))
db.executemany(
'''INSERT OR REPLACE INTO overhead VALUES (?,?,?,?,?,?)''',
__count_messages()
)
def parse_end_to_end(db, expid, phases, addresses, serial):
def __parse_messages():
for line in serial:
res = payload_parser.parse(line)
if res:
yield res.fixed
e2e = pd.DataFrame(__parse_messages(), columns=['timestamp', 'host', 'type', 'address', 'seqnr'])
# loss, delay, jitter
def __process_delay():
for phasename, start, stop in phases:
pe2e = e2e[(start <= e2e['timestamp']) & (e2e['timestamp'] < stop)]
send = pe2e[pe2e['type'] == 'send']
send = send.set_index(['host', 'seqnr'])
recv = pe2e[pe2e['type'] == 'recv']
recv = recv.join(addresses, lsuffix='_dest', on=['address'], how='inner')
recv = recv.set_index(['host', 'seqnr'])
pe2e = send.join(recv, rsuffix='_arrived', sort=False)
pe2e['delay'] = pe2e['timestamp_arrived'] - pe2e['timestamp']
for host, group in pe2e.groupby('host'):
delays = group['delay']
delay = delays.mean()
jitter = delays.var()
if delays.count() == 0:
print(host)
loss = delays.isnull().sum() / delays.count()
yield expid, phasename, host, delay, jitter, loss
db.executemany('''INSERT OR REPLACE INTO end_to_end VALUES (?,?,?,?,?,?)''', __process_delay())
def parse_contiki_starts(db, expid, serial, exclude=['m3-200', 'm3-157'], cphases=6, phaselen=600):
def read_phases_from_log():
for line in serial:
res = node_start_parser.parse(line)
if res:
yield res.fixed
starts = pd.DataFrame(np.array(list(read_phases_from_log()), dtype=[('timestamp', 'f'), ('host', 'U10')]))
phases = pd.DataFrame()
for name, group in starts.groupby('host'):
if not name in exclude:
phases[name] = group['timestamp'].reset_index(drop=True).sort_values()
phases['min'] = phases.min(axis=1)
phases['max'] = phases.max(axis=1)
phases['diff'] = phases['max'] - phases['min']
print(phases.loc[:,'min':'diff'])
def _format():
phase = 0
for t in phases['min']:
phase += 1
yield expid, phase, t
#db.executemany(''' INSERT OR REPLACE INTO phases VALUES(?,?,?,?) ''', _format())
def phases(db):
return db.execute('''
SELECT phase, expid, tstart, tstop
FROM phases
ORDER BY phase, expid''')

117
tools/run.sh Executable file
View file

@ -0,0 +1,117 @@
#!/bin/bash
# experiments are kept like this:
# EXPID/{files}
# EXPID2/{files}
set -u
set -e
AUTHRC=${HOME}/.iotlabrc
RUSER=$(cat ${AUTHRC} | cut -d':' -f1)
# dummy
RHOST="localhost"
OLDEXPDIR=$(dirname ${2})
# find out the host for the site we are running the experiment on
RHOST=$(jq '.nodes[0]' ${2} | cut -d . -f2- | cut -d'"' -f-1)
# source hooks specific for experiment
if [ -f ${OLDEXPDIR}/hooks.sh ]; then
. ${OLDEXPDIR}/hooks.sh
else
# hook functions
pre() {
echo $1
}
during() {
echo $1
}
post() {
echo $1
}
fi
_firmwares() { # 1: experiment.json
FIRMWARENAMES=$(jq '.firmwareassociations[].firmwarename' ${1} | cut -d'"' -f2)
for f in ${FIRMWARENAMES}; do
echo -n " -l $(dirname ${1})/${f}"
done
}
_name() { # 1: experiment.json
jq '.name' ${1} | cut -d '"' -f1
}
auth() {
xargs -n 1 auth-cli -u
}
track() { # 1: expdir
mkdir -p ${1}
(yes | ssh -l ${RUSER} ${RHOST} serial_aggregator > ${1}/serial.log 2> ${1}/serial_error.log) &
(ssh -n -l ${RUSER} ${RHOST} sniffer_aggregator -o - > ${1}/sniffer.pcap 2> ${1}/sniffer_error.log) &
}
load() { # 1: experiment.json
iotlab-experiment load -f ${1} $(_firmwares ${1}) | jq '.id'
}
save() { # 1: id
(
iotlab-experiment get -i ${1} -a
tar -xvf ${1}.tar.gz
)
scp -r ${RUSER}@${RHOST}:.iot-lab/${1} .
}
record() { #1: odlexp json, 2: new expid
EXPID=$2
# monitor and control experiment
while iotlab-experiment wait -i ${EXPID} --state=Running --step 1 --timeout 36000 ; [ $? -ne 0 ]; do
echo ${EXPID} waiting
done
track ${EXPID} &
during ${OLDEXPDIR} ${EXPID} &
# wait until finished and execute post hook
iotlab-experiment wait -i ${EXPID} --state=Terminated --timeout 36000 --step 1
post ${EXPID}
# save experiment files from API
save ${EXPID}
# make copy of used hooks
#install ${PWD}/${OLDID}/hooks.sh ${PWD}/${EXPID}
}
run() { # 1: path to experiment.json
# hook for further preparations
pre ${OLDEXPDIR}
# load the experiment and get the ID of the new run
EXPID=$(load ${1})
#record ${1} ${EXPID}
}
if [ ! -f ${AUTHRC} ]; then
auth
fi
if [ ${1} == 'run' ]; then
run ${2}
fi
if [ ${1} == 'record' ]; then
record ${2} ${3}
fi
if [ ${1} == 'record-all' ]; then
for exp in $(iotlab-experiment get -l --state=Waiting | jq '.[][] | .id' | sort); do
record ${2} $exp
done
fi

283
tools/topology.py Executable file
View file

@ -0,0 +1,283 @@
#!/usr/bin/env python
import sqlite3
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from rpl import analysis as an, data as dat
from matplotlib import colors, cm
tubspalette = ['#be1e3c', '#ffc82a', '#e16d00', '#711c2f', '#acc13a', '#6d8300', '#00534a', '#66b4d3', '#007a9b', '#003f57', '#8a307f', '#511246', '#4c1830']
phase_names = ['N', 'R', 'H', 'HR', 'HS', 'HSR']
versions = ['Contiki', 'Hardened', 'Hardened with UIDs']
def network_dag_evolution(db, phase, expid, resolution):
"""For each slot of length resolution yields a graph containing the last
DAG during that time"""
phase_start, phase_stop = db.execute('''
SELECT tstart, tstop
FROM phases
WHERE phase = ? AND expid = ?''', (phase, expid)).fetchone()
phaselen = int(phase_stop - phase_start)
for t0 in range(int(phase_start), int(phase_stop), resolution):
edges = db.execute('''
WITH resolves AS (
SELECT MAX(tchange) AS t, host
FROM dag_evolution
WHERE tchange < ? AND phase = ? AND expid = ?
GROUP BY host
)
SELECT r.host, d.parent
FROM dag_evolution AS d
JOIN resolves AS r
ON d.host = r.host AND r.t == d.tchange
''', (t0 + resolution, phase, expid))
g = nx.DiGraph()
for src, dest in edges:
g.add_edge(src[3:], dest[3:])
yield g
def draw_dag(g, output):
nx.set_node_attributes(g, 'NexusSerifPro', name='fontname')
nx.set_node_attributes(g, '#e0f0f6', name='fillcolor')
nx.set_node_attributes(g, 'filled', name='style')
nx.set_edge_attributes(g, 'open', name='arrowhead')
a = nx.nx_agraph.to_agraph(g)
a.draw(output, prog='dot')
def network_count_preferred(db):
edge_weights = db.execute('''
SELECT ps.phase, source, destination, AVG(count_preferred / (ps.tstop - ps.tstart))
FROM dag_edges AS de
JOIN phases AS ps
ON ps.phase = de.phase AND ps.expid = de.expid
AND count_preferred > 0
GROUP BY source, destination, de.phase''')
def _format():
for p, s, d, av in edge_weights:
yield phase_names[p-1], int(s[3:]), int(d[3:]), av
return np.array(list(_format()), dtype=[('phase', 'U3'), ('source', 'd'), ('next hop', 'd'), ('preferred', 'f')])
def plot_routes_heatmap(phasesroutes_with_weight):
def phase_heatmap(x, y, val, **kwargs):
data = kwargs.pop('data')
print(data)
d = data.pivot(index=x, columns=y, values=val)
print(d)
sns.heatmap(d, **kwargs)
df = pd.DataFrame(phasesroutes_with_weight)
fgrid = sns.FacetGrid(df, col='phase', col_wrap=3, col_order=['N', 'H', 'HS', 'R', 'HR', 'HSR'])
fgrid = fgrid.map_dataframe(phase_heatmap, 'source', 'next hop',
'preferred', annot=False, cbar=False, square=True,
cmap=sns.light_palette(tubspalette[0]))
fgrid.set_xticklabels([])
fgrid.set_yticklabels([])
def preferred_routes(db, args):
routes = network_count_preferred(db)
plot_routes_heatmap(routes)
def count_network_distinct_source(db, out):
counts = db.execute('''
select phase, COUNT(DISTINCT source), (phase - 1)/ 2 from dag_edges GROUP BY phase, expid ORDER BY phase
''')
def filter_bad_ones():
for phase, c, r in counts:
if phase < 7:
yield phase_names[(phase-1)], c, versions[int(r)]
data = np.array(list(filter_bad_ones()), dtype=[('Phase', 'U3'), ('Participating Nodes', 'd'), ('Version', 'U20')])
sns.swarmplot(y='Phase', x='Participating Nodes', hue='Version', data=pd.DataFrame(data))
def weighed_to_graphviz(g, root, cutoff=1):
nx.set_node_attributes(g, 'NexusSerifPro', name='fontname')
nx.set_node_attributes(g, '#aaaaff', name='fillcolor')
nx.set_node_attributes(g, 'filled', name='style')
nx.set_edge_attributes(g, 'open', name='arrowhead')
#root = g.nodes(Data=True)[root]
#root['fillcolor'] = '#ffaaaa'
deletelist = []
for x, y, data in g.edges(data=True):
if data['weight'] < cutoff:
deletelist.append((x, y))
for x, y in deletelist:
g.remove_edge(x, y)
weights = list(nx.get_edge_attributes(g, 'weight').values())
for x, y, data in g.edges(data=True):
#data['xlabel'] = data['weight']
data['penwidth'] = 10 * data['weight'] / max(weights)
data['arrowhead'] = 'normal'
data['dir'] = 'forward'
return nx.nx_agraph.to_agraph(g)
#def draw_all_topologies(db, root, cutoff=1):
# for phase, _, _, _ in dat.phases(db):
# g = network_count_preferred(db, phase)
# a = weighed_to_graphviz(g, root, cutoff)
# a.draw('graph-%s.pdf' % phase, prog='dot')
def dag(db, args, fileformat='pdf'):
step = 0
for exp in args.experiments:
for g in network_dag_evolution(db, args.phase, exp, args.resolution[0]):
step += args.resolution[0]
draw_dag(g, '%d-%d-%d.%s' % (args.phase, exp, step, fileformat))
def default_route_changes(db, args):
count_changes = db.execute('''
WITH change_src AS (
SELECT expid, phase, source, COUNT(nexthop) cnt
FROM default_route_changes
GROUP BY expid, phase, source)
SELECT phase, SUM(cnt)
FROM change_src AS csr
GROUP BY expid, phase''')
def _format():
for p, c in count_changes:
yield phase_names[p-1], c, phase_names[int((p-1)/2) * 2], (p - 1) % 2 == 1
data = pd.DataFrame(_format(), columns=['phase', 'changes', 'firmware', 'reset'])
sns.barplot(x='firmware', y='changes', hue='reset', data=pd.DataFrame(data))
def dag_convergence_times(db):
return db.execute('''
WITH firstpp AS (
SELECT ph.phase, ph.expid, host, MIN(tchange - ph.tstart) AS t
FROM dag_evolution AS de
JOIN phases AS ph
ON de.phase = ph.phase AND de.expid = ph.expid
GROUP BY ph.expid, ph.phase, host
)
SELECT phase, MAX(firstpp.t)
FROM firstpp
GROUP BY expid, phase''')
def plot_dag_convergence_times(db, args):
convtimes = dag_convergence_times(db)
def _format():
for p, t in convtimes:
yield phase_names[p-1], t, (p - 1) % 2 == 1
data = np.array(list(_format()), dtype=[('phase', 'U3'), ('convergence time', 'f'), ('reset', bool)])
print(data)
sns.barplot(y='reset', x='convergence time', hue='phase', data=pd.DataFrame(data))
def plot_rank_changes(db, args):
rank_vs_changes = db.execute(
'''
WITH s AS (
SELECT expid, phase, source, COUNT(nexthop) AS stab
FROM default_route_changes
GROUP BY expid, phase, source)
SELECT s.phase, avg_rank, avg_neighbors, s.stab
FROM dag_nodes AS n
JOIN s ON s.expid = n.expid AND s.phase = n.phase AND s.source = n.host
''')
data = pd.DataFrame(rank_vs_changes.fetchall(), columns=['phase', 'rank', 'neighbors', 'changes'])
data = data.replace([np.inf, -np.inf], np.nan).dropna()
g = sns.pairplot(data, diag_kind='kde', kind='reg', hue='phase', vars=['rank', 'neighbors', 'changes'])
print(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bla')
subparsers = parser.add_subparsers(help='subcommand')
dag_cmd_parser = subparsers.add_parser('dag')
dag_cmd_parser.set_defaults(func=dag)
dag_cmd_parser.add_argument('phase', type=int)
dag_cmd_parser.add_argument('resolution', nargs=1, type=int)
dag_cmd_parser.add_argument('experiments', nargs='+', type=int)
dag_cmd_parser.add_argument('--database', '-d', nargs=1, help='database file')
default_route_cmd = subparsers.add_parser('stability')
default_route_cmd.set_defaults(func=default_route_changes)
default_route_cmd.add_argument('--output', '-o', nargs=1, type=str)
default_route_cmd.add_argument('--database', '-d', nargs=1, help='database file')
route_selection_hm = subparsers.add_parser('routes')
route_selection_hm.set_defaults(func=preferred_routes)
route_selection_hm.add_argument('--output', '-o', nargs=1, type=str)
route_selection_hm.add_argument('--database', '-d', nargs=1, help='database file')
convergence_time = subparsers.add_parser('convergence')
convergence_time.set_defaults(func=plot_dag_convergence_times)
convergence_time.add_argument('--output', '-o', nargs=1, type=str)
convergence_time.add_argument('--database', '-d', nargs=1, help='database file')
rank_changes = subparsers.add_parser('changes')
rank_changes.set_defaults(func=plot_rank_changes)
rank_changes.add_argument('--database', '-d', nargs=1)
rank_changes.add_argument('--output', '-o', nargs=1)
args = parser.parse_args()
db = dat.init_db(args.database[0])
#draw_all_topologies(db, 'm3-157', 1)
#count_network_distinct_source(db, 'distinct_sources.pdf')
plt.figure()
sns.set()
sns.set(font='NexusSerifPro')
sns.set_palette(tubspalette)
args.func(db, args)
if args.func in [plot_dag_convergence_times, preferred_routes, default_route_changes]:
plt.savefig(args.output[0])