Merge pull request #4 from sparky8512/working

Mostly cosmetic cleanups and module reorganization
This commit is contained in:
sparky8512 2021-01-06 11:59:24 -08:00 committed by GitHub
commit 96b7634a3d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 667 additions and 388 deletions

View file

@ -12,7 +12,7 @@ import spacex.api.device.device_pb2_grpc
# Note that if you remove the 'with' clause here, you need to separately # Note that if you remove the 'with' clause here, you need to separately
# call channel.close() when you're done with the gRPC connection. # call channel.close() when you're done with the gRPC connection.
with grpc.insecure_channel('192.168.100.1:9200') as channel: with grpc.insecure_channel("192.168.100.1:9200") as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))

View file

@ -2,7 +2,7 @@
###################################################################### ######################################################################
# #
# Equivalent script to parseJsonHistory.py, except integrating the # Equivalent script to parseJsonHistory.py, except integrating the
# gRPC calls, instead of relying on separatate invocation of grpcurl. # gRPC calls, instead of relying on separate invocation of grpcurl.
# #
# This script examines the most recent samples from the history data # This script examines the most recent samples from the history data
# and computes several different metrics related to packet loss. By # and computes several different metrics related to packet loss. By
@ -10,178 +10,103 @@
# #
###################################################################### ######################################################################
import grpc
import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc
import datetime import datetime
import sys import sys
import getopt import getopt
from itertools import chain import starlink_grpc
arg_error = False
fArgError = False
try: try:
opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH") opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH")
except getopt.GetoptError as err: except getopt.GetoptError as err:
print(str(err)) print(str(err))
fArgError = True arg_error = True
# Default to 1 hour worth of data samples. # Default to 1 hour worth of data samples.
parseSamples = 3600 samples_default = 3600
fUsage = False samples = samples_default
fVerbose = False print_usage = False
fParseAll = False verbose = False
fHeader = False parse_all = False
fRunLengths = False print_header = False
run_lengths = False
if not fArgError: if not arg_error:
if len(args) > 0: if len(args) > 0:
fArgError = True arg_error = True
else: else:
for opt, arg in opts: for opt, arg in opts:
if opt == "-a": if opt == "-a":
fParseAll = True parse_all = True
elif opt == "-h": elif opt == "-h":
fUsage = True print_usage = True
elif opt == "-r": elif opt == "-r":
fRunLengths = True run_lengths = True
elif opt == "-s": elif opt == "-s":
parseSamples = int(arg) samples = int(arg)
elif opt == "-v": elif opt == "-v":
fVerbose = True verbose = True
elif opt == "-H": elif opt == "-H":
fHeader = True print_header = True
if fUsage or fArgError: if print_usage or arg_error:
print("Usage: "+sys.argv[0]+" [options...]") print("Usage: " + sys.argv[0] + " [options...]")
print("Options:") print("Options:")
print(" -a: Parse all valid samples") print(" -a: Parse all valid samples")
print(" -h: Be helpful") print(" -h: Be helpful")
print(" -r: Include ping drop run length stats") print(" -r: Include ping drop run length stats")
print(" -s <num>: Parse <num> data samples, default: "+str(parseSamples)) print(" -s <num>: Parse <num> data samples, default: " + str(samples_default))
print(" -v: Be verbose") print(" -v: Be verbose")
print(" -H: print CSV header instead of parsing file") print(" -H: print CSV header instead of parsing file")
sys.exit(1 if fArgError else 0) sys.exit(1 if arg_error else 0)
if fHeader: fields, rl_fields = starlink_grpc.history_ping_field_names()
header = "datetimestamp_utc,samples,total_ping_drop,count_full_ping_drop,count_obstructed,total_obstructed_ping_drop,count_full_obstructed_ping_drop,count_unscheduled,total_unscheduled_ping_drop,count_full_unscheduled_ping_drop"
if fRunLengths: if print_header:
header+= ",init_run_fragment,final_run_fragment," header = ["datetimestamp_utc"]
header += ",".join("run_seconds_" + str(x) for x in range(1, 61)) + "," header.extend(fields)
header += ",".join("run_minutes_" + str(x) for x in range(1, 60)) if run_lengths:
header += ",run_minutes_60_or_greater" for field in rl_fields:
print(header) if field.startswith("run_"):
header.extend(field + "_" + str(x) for x in range(1, 61))
else:
header.append(field)
print(",".join(header))
sys.exit(0) sys.exit(0)
with grpc.insecure_channel('192.168.100.1:9200') as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_history={}))
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
historyData = response.dish_get_history stats, rl_stats = starlink_grpc.history_ping_stats(-1 if parse_all else samples,
verbose)
# 'current' is the count of data samples written to the ring buffer, if stats is None or rl_stats is None:
# irrespective of buffer wrap. # verbose output already happened, so just bail.
current = int(historyData.current) sys.exit(1)
nSamples = len(historyData.pop_ping_drop_rate)
if fVerbose: if verbose:
print("current: " + str(current)) print("Parsed samples: " + str(stats["samples"]))
print("All samples: " + str(nSamples)) print("Total ping drop: " + str(stats["total_ping_drop"]))
print("Count of drop == 1: " + str(stats["count_full_ping_drop"]))
nSamples = min(nSamples,current) print("Obstructed: " + str(stats["count_obstructed"]))
print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"]))
if fVerbose: print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"]))
print("Valid samples: " + str(nSamples)) print("Unscheduled: " + str(stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"]))
# This is ring buffer offset, so both index to oldest data sample and print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"]))
# index to next data sample after the newest one. if run_lengths:
offset = current % nSamples print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"]))
print("Final drop run fragment: " + str(rl_stats["final_run_fragment"]))
tot = 0 print("Per-second drop runs: " + ", ".join(str(x) for x in rl_stats["run_seconds"]))
totOne = 0 print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"]))
totUnsched = 0
totUnschedD = 0
totUnschedOne = 0
totObstruct = 0
totObstructD = 0
totObstructOne = 0
secondRuns = [0] * 60
minuteRuns = [0] * 60
runLength = 0
initRun = None
if fParseAll or nSamples < parseSamples:
parseSamples = nSamples
# Parse the most recent parseSamples-sized set of samples. This will
# iterate samples in order from oldest to newest, although that's not
# actually required for the current set of stats being computed below.
if parseSamples <= offset:
sampleRange = range(offset - parseSamples, offset)
else: else:
sampleRange = chain(range(nSamples + offset - parseSamples, nSamples), range(0, offset)) csv_data = [timestamp.replace(microsecond=0).isoformat()]
csv_data.extend(str(stats[field]) for field in fields)
for i in sampleRange: if run_lengths:
d = historyData.pop_ping_drop_rate[i] for field in rl_fields:
tot += d if field.startswith("run_"):
if d >= 1: csv_data.extend(str(substat) for substat in rl_stats[field])
totOne += d
runLength += 1
elif runLength > 0:
if initRun is None:
initRun = runLength
else:
if runLength <= 60:
secondRuns[runLength-1] += runLength
else: else:
minuteRuns[min((runLength-1)//60-1, 59)] += runLength csv_data.append(str(rl_stats[field]))
runLength = 0 print(",".join(csv_data))
elif initRun is None:
initRun = 0
if not historyData.scheduled[i]:
totUnsched += 1
totUnschedD += d
if d >= 1:
totUnschedOne += d
if historyData.obstructed[i]:
totObstruct += 1
totObstructD += d
if d >= 1:
totObstructOne += d
# If the entire sample set is one big drop run, it will be both initial
# fragment (continued from prior sample range) and final one (continued
# to next sample range), but to avoid double-reporting, just call it
# the initial run.
if initRun is None:
initRun = runLength
runLength = 0
if fVerbose:
print("Parsed samples: " + str(parseSamples))
print("Total ping drop: " + str(tot))
print("Count of drop == 1: " + str(totOne))
print("Obstructed: " + str(totObstruct))
print("Obstructed ping drop: " + str(totObstructD))
print("Obstructed drop == 1: " + str(totObstructOne))
print("Unscheduled: " + str(totUnsched))
print("Unscheduled ping drop: " + str(totUnschedD))
print("Unscheduled drop == 1: " + str(totUnschedOne))
if fRunLengths:
print("Initial drop run fragment: " + str(initRun))
print("Final drop run fragment: " + str(runLength))
print("Per-second drop runs: " + ", ".join(str(x) for x in secondRuns))
print("Per-minute drop runs: " + ", ".join(str(x) for x in minuteRuns))
else:
# NOTE: When changing data output format, also change the -H header printing above.
csvData = timestamp.replace(microsecond=0).isoformat() + "," + ",".join(str(x) for x in [parseSamples, tot, totOne, totObstruct, totObstructD, totObstructOne, totUnsched, totUnschedD, totUnschedOne])
if fRunLengths:
csvData += "," + ",".join(str(x) for x in chain([initRun, runLength], secondRuns, minuteRuns))
print(csvData)

View file

@ -6,14 +6,14 @@
# This script pulls the current status once and prints to stdout. # This script pulls the current status once and prints to stdout.
# #
###################################################################### ######################################################################
import datetime
import grpc import grpc
import spacex.api.device.device_pb2 import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc import spacex.api.device.device_pb2_grpc
import datetime with grpc.insecure_channel("192.168.100.1:9200") as channel:
with grpc.insecure_channel('192.168.100.1:9200') as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
@ -27,11 +27,25 @@ alert_bits = 0
for alert in status.alerts.ListFields(): for alert in status.alerts.ListFields():
alert_bits |= (1 if alert[1] else 0) << (alert[0].number - 1) alert_bits |= (1 if alert[1] else 0) << (alert[0].number - 1)
print(",".join([timestamp.replace(microsecond=0).isoformat(), status.device_info.id, csv_data = [
status.device_info.hardware_version, status.device_info.software_version, timestamp.replace(microsecond=0).isoformat(),
spacex.api.device.dish_pb2.DishState.Name(status.state)]) + "," + status.device_info.id,
",".join(str(x) for x in [status.device_state.uptime_s, status.snr, status.seconds_to_first_nonempty_slot, status.device_info.hardware_version,
status.pop_ping_drop_rate, status.downlink_throughput_bps, status.uplink_throughput_bps, status.device_info.software_version,
status.pop_ping_latency_ms, alert_bits, status.obstruction_stats.fraction_obstructed, spacex.api.device.dish_pb2.DishState.Name(status.state)
status.obstruction_stats.currently_obstructed, status.obstruction_stats.last_24h_obstructed_s]) + "," + ]
",".join(str(x) for x in status.obstruction_stats.wedge_abs_fraction_obstructed)) csv_data.extend(str(x) for x in [
status.device_state.uptime_s,
status.snr,
status.seconds_to_first_nonempty_slot,
status.pop_ping_drop_rate,
status.downlink_throughput_bps,
status.uplink_throughput_bps,
status.pop_ping_latency_ms,
alert_bits,
status.obstruction_stats.fraction_obstructed,
status.obstruction_stats.currently_obstructed,
status.obstruction_stats.last_24h_obstructed_s
])
csv_data.extend(str(x) for x in status.obstruction_stats.wedge_abs_fraction_obstructed)
print(",".join(csv_data))

View file

@ -7,6 +7,8 @@
# the specified InfluxDB database in a loop. # the specified InfluxDB database in a loop.
# #
###################################################################### ######################################################################
import time
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
from influxdb import SeriesHelper from influxdb import SeriesHelper
@ -15,10 +17,8 @@ import grpc
import spacex.api.device.device_pb2 import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc import spacex.api.device.device_pb2_grpc
import time verbose = True
sleep_time = 30
fVerbose = True
sleepTime = 30
class DeviceStatusSeries(SeriesHelper): class DeviceStatusSeries(SeriesHelper):
class Meta: class Meta:
@ -41,20 +41,20 @@ class DeviceStatusSeries(SeriesHelper):
"fraction_obstructed"] "fraction_obstructed"]
tags = ["id"] tags = ["id"]
influxClient = InfluxDBClient(host="localhost", port=8086, username="script-user", password="password", database="dishstats", ssl=False, retries=1, timeout=15) influx_client = InfluxDBClient(host="localhost", port=8086, username="script-user", password="password", database="dishstats", ssl=False, retries=1, timeout=15)
try: try:
dishChannel = None dish_channel = None
lastId = None last_id = None
fLastFailed = False last_failed = False
pending = 0 pending = 0
count = 0 count = 0
while True: while True:
try: try:
if dishChannel is None: if dish_channel is None:
dishChannel = grpc.insecure_channel("192.168.100.1:9200") dish_channel = grpc.insecure_channel("192.168.100.1:9200")
stub = spacex.api.device.device_pb2_grpc.DeviceStub(dishChannel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(dish_channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
status = response.dish_get_status status = response.dish_get_status
DeviceStatusSeries( DeviceStatusSeries(
@ -74,45 +74,45 @@ try:
pop_ping_latency_ms=status.pop_ping_latency_ms, pop_ping_latency_ms=status.pop_ping_latency_ms,
currently_obstructed=status.obstruction_stats.currently_obstructed, currently_obstructed=status.obstruction_stats.currently_obstructed,
fraction_obstructed=status.obstruction_stats.fraction_obstructed) fraction_obstructed=status.obstruction_stats.fraction_obstructed)
lastId = status.device_info.id last_id = status.device_info.id
fLastFailed = False last_failed = False
except Exception as e: except grpc.RpcError:
if not dishChannel is None: if dish_channel is not None:
dishChannel.close() dish_channel.close()
dishChannel = None dish_channel = None
if fLastFailed: if last_failed:
if not lastId is None: if last_id is not None:
DeviceStatusSeries(id=lastId, state="DISH_UNREACHABLE") DeviceStatusSeries(id=last_id, state="DISH_UNREACHABLE")
else: else:
# Retry once, because the connection may have been lost while # Retry once, because the connection may have been lost while
# we were sleeping # we were sleeping
fLastFailed = True last_failed = True
continue continue
pending = pending + 1 pending = pending + 1
if fVerbose: if verbose:
print("Samples: " + str(pending)) print("Samples: " + str(pending))
count = count + 1 count = count + 1
if count > 5: if count > 5:
try: try:
DeviceStatusSeries.commit(influxClient) DeviceStatusSeries.commit(influx_client)
if fVerbose: if verbose:
print("Wrote " + str(pending)) print("Wrote " + str(pending))
pending = 0 pending = 0
except Exception as e: except Exception as e:
print("Failed to write: " + str(e)) print("Failed to write: " + str(e))
count = 0 count = 0
if sleepTime > 0: if sleep_time > 0:
time.sleep(sleepTime) time.sleep(sleep_time)
else: else:
break break
finally: finally:
# Flush on error/exit # Flush on error/exit
try: try:
DeviceStatusSeries.commit(influxClient) DeviceStatusSeries.commit(influx_client)
if fVerbose: if verbose:
print("Wrote " + str(pending)) print("Wrote " + str(pending))
except Exception as e: except Exception as e:
print("Failed to write: " + str(e)) print("Failed to write: " + str(e))
influxClient.close() influx_client.close()
if not dishChannel is None: if dish_channel is not None:
dishChannel.close() dish_channel.close()

View file

@ -14,7 +14,7 @@ import grpc
import spacex.api.device.device_pb2 import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc import spacex.api.device.device_pb2_grpc
with grpc.insecure_channel('192.168.100.1:9200') as channel: with grpc.insecure_channel("192.168.100.1:9200") as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
@ -26,25 +26,25 @@ alert_bits = 0
for alert in status.alerts.ListFields(): for alert in status.alerts.ListFields():
alert_bits |= (1 if alert[1] else 0) << (alert[0].number - 1) alert_bits |= (1 if alert[1] else 0) << (alert[0].number - 1)
topicPrefix = "starlink/dish_status/" + status.device_info.id + "/" topic_prefix = "starlink/dish_status/" + status.device_info.id + "/"
msgs = [(topicPrefix + "hardware_version", status.device_info.hardware_version, 0, False), msgs = [(topic_prefix + "hardware_version", status.device_info.hardware_version, 0, False),
(topicPrefix + "software_version", status.device_info.software_version, 0, False), (topic_prefix + "software_version", status.device_info.software_version, 0, False),
(topicPrefix + "state", spacex.api.device.dish_pb2.DishState.Name(status.state), 0, False), (topic_prefix + "state", spacex.api.device.dish_pb2.DishState.Name(status.state), 0, False),
(topicPrefix + "uptime", status.device_state.uptime_s, 0, False), (topic_prefix + "uptime", status.device_state.uptime_s, 0, False),
(topicPrefix + "snr", status.snr, 0, False), (topic_prefix + "snr", status.snr, 0, False),
(topicPrefix + "seconds_to_first_nonempty_slot", status.seconds_to_first_nonempty_slot, 0, False), (topic_prefix + "seconds_to_first_nonempty_slot", status.seconds_to_first_nonempty_slot, 0, False),
(topicPrefix + "pop_ping_drop_rate", status.pop_ping_drop_rate, 0, False), (topic_prefix + "pop_ping_drop_rate", status.pop_ping_drop_rate, 0, False),
(topicPrefix + "downlink_throughput_bps", status.downlink_throughput_bps, 0, False), (topic_prefix + "downlink_throughput_bps", status.downlink_throughput_bps, 0, False),
(topicPrefix + "uplink_throughput_bps", status.uplink_throughput_bps, 0, False), (topic_prefix + "uplink_throughput_bps", status.uplink_throughput_bps, 0, False),
(topicPrefix + "pop_ping_latency_ms", status.pop_ping_latency_ms, 0, False), (topic_prefix + "pop_ping_latency_ms", status.pop_ping_latency_ms, 0, False),
(topicPrefix + "alerts", alert_bits, 0, False), (topic_prefix + "alerts", alert_bits, 0, False),
(topicPrefix + "fraction_obstructed", status.obstruction_stats.fraction_obstructed, 0, False), (topic_prefix + "fraction_obstructed", status.obstruction_stats.fraction_obstructed, 0, False),
(topicPrefix + "currently_obstructed", status.obstruction_stats.currently_obstructed, 0, False), (topic_prefix + "currently_obstructed", status.obstruction_stats.currently_obstructed, 0, False),
# While the field name for this one implies it covers 24 hours, the # While the field name for this one implies it covers 24 hours, the
# empirical evidence suggests it only covers 12 hours. It also resets # empirical evidence suggests it only covers 12 hours. It also resets
# on dish reboot, so may not cover that whole period. Rather than try # on dish reboot, so may not cover that whole period. Rather than try
# to convey that complexity in the topic label, just be a bit vague: # to convey that complexity in the topic label, just be a bit vague:
(topicPrefix + "seconds_obstructed", status.obstruction_stats.last_24h_obstructed_s, 0, False), (topic_prefix + "seconds_obstructed", status.obstruction_stats.last_24h_obstructed_s, 0, False),
(topicPrefix + "wedges_fraction_obstructed", ",".join(str(x) for x in status.obstruction_stats.wedge_abs_fraction_obstructed), 0, False)] (topic_prefix + "wedges_fraction_obstructed", ",".join(str(x) for x in status.obstruction_stats.wedge_abs_fraction_obstructed), 0, False)]
paho.mqtt.publish.multiple(msgs, hostname="localhost", client_id=status.device_info.id) paho.mqtt.publish.multiple(msgs, hostname="localhost", client_id=status.device_info.id)

View file

@ -13,178 +13,105 @@
# #
###################################################################### ######################################################################
import json
import datetime import datetime
import sys import sys
import getopt import getopt
from itertools import chain import starlink_json
fArgError = False arg_error = False
try: try:
opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH") opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH")
except getopt.GetoptError as err: except getopt.GetoptError as err:
print(str(err)) print(str(err))
fArgError = True arg_error = True
# Default to 1 hour worth of data samples. # Default to 1 hour worth of data samples.
parseSamples = 3600 samples_default = 3600
fUsage = False samples = samples_default
fVerbose = False print_usage = False
fParseAll = False verbose = False
fHeader = False parse_all = False
fRunLengths = False print_header = False
run_lengths = False
if not fArgError: if not arg_error:
if len(args) > 1: if len(args) > 1:
fArgError = True arg_error = True
else: else:
for opt, arg in opts: for opt, arg in opts:
if opt == "-a": if opt == "-a":
fParseAll = True parse_all = True
elif opt == "-h": elif opt == "-h":
fUsage = True print_usage = True
elif opt == "-r": elif opt == "-r":
fRunLengths = True run_lengths = True
elif opt == "-s": elif opt == "-s":
parseSamples = int(arg) samples = int(arg)
elif opt == "-v": elif opt == "-v":
fVerbose = True verbose = True
elif opt == "-H": elif opt == "-H":
fHeader = True print_header = True
if fUsage or fArgError: if print_usage or arg_error:
print("Usage: "+sys.argv[0]+" [options...] [<file>]") print("Usage: " + sys.argv[0] + " [options...] [<file>]")
print(" where <file> is the file to parse, default: stdin") print(" where <file> is the file to parse, default: stdin")
print("Options:") print("Options:")
print(" -a: Parse all valid samples") print(" -a: Parse all valid samples")
print(" -h: Be helpful") print(" -h: Be helpful")
print(" -r: Include ping drop run length stats") print(" -r: Include ping drop run length stats")
print(" -s <num>: Parse <num> data samples, default: "+str(parseSamples)) print(" -s <num>: Parse <num> data samples, default: " + str(samples_default))
print(" -v: Be verbose") print(" -v: Be verbose")
print(" -H: print CSV header instead of parsing file") print(" -H: print CSV header instead of parsing file")
sys.exit(1 if fArgError else 0) sys.exit(1 if arg_error else 0)
if fHeader: fields, rl_fields = starlink_json.history_ping_field_names()
header = "datetimestamp_utc,samples,total_ping_drop,count_full_ping_drop,count_obstructed,total_obstructed_ping_drop,count_full_obstructed_ping_drop,count_unscheduled,total_unscheduled_ping_drop,count_full_unscheduled_ping_drop"
if fRunLengths: if print_header:
header+= ",init_run_fragment,final_run_fragment," header = ["datetimestamp_utc"]
header += ",".join("run_seconds_" + str(x) for x in range(1, 61)) + "," header.extend(fields)
header += ",".join("run_minutes_" + str(x) for x in range(1, 60)) if run_lengths:
header += ",run_minutes_60_or_greater" for field in rl_fields:
print(header) if field.startswith("run_"):
header.extend(field + "_" + str(x) for x in range(1, 61))
else:
header.append(field)
print(",".join(header))
sys.exit(0) sys.exit(0)
# Allow "-" to be specified as file for stdin.
if len(args) == 0 or args[0] == "-":
jsonData = json.load(sys.stdin)
else:
jsonFile = open(args[0])
jsonData = json.load(jsonFile)
jsonFile.close()
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
historyData = jsonData['dishGetHistory'] stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-",
-1 if parse_all else samples,
verbose)
# 'current' is the count of data samples written to the ring buffer, if stats is None or rl_stats is None:
# irrespective of buffer wrap. # verbose output already happened, so just bail.
current = int(historyData['current']) sys.exit(1)
nSamples = len(historyData['popPingDropRate'])
if fVerbose: if verbose:
print("current: " + str(current)) print("Parsed samples: " + str(stats["samples"]))
print("All samples: " + str(nSamples)) print("Total ping drop: " + str(stats["total_ping_drop"]))
print("Count of drop == 1: " + str(stats["count_full_ping_drop"]))
nSamples = min(nSamples,current) print("Obstructed: " + str(stats["count_obstructed"]))
print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"]))
if fVerbose: print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"]))
print("Valid samples: " + str(nSamples)) print("Unscheduled: " + str(stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"]))
# This is ring buffer offset, so both index to oldest data sample and print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"]))
# index to next data sample after the newest one. if run_lengths:
offset = current % nSamples print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"]))
print("Final drop run fragment: " + str(rl_stats["final_run_fragment"]))
tot = 0 print("Per-second drop runs: " + ", ".join(str(x) for x in rl_stats["run_seconds"]))
totOne = 0 print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"]))
totUnsched = 0
totUnschedD = 0
totUnschedOne = 0
totObstruct = 0
totObstructD = 0
totObstructOne = 0
secondRuns = [0] * 60
minuteRuns = [0] * 60
runLength = 0
initRun = None
if fParseAll or nSamples < parseSamples:
parseSamples = nSamples
# Parse the most recent parseSamples-sized set of samples. This will
# iterate samples in order from oldest to newest, although that's not
# actually required for the current set of stats being computed below.
if parseSamples <= offset:
sampleRange = range(offset - parseSamples, offset)
else: else:
sampleRange = chain(range(nSamples + offset - parseSamples, nSamples), range(0, offset)) csv_data = [timestamp.replace(microsecond=0).isoformat()]
csv_data.extend(str(stats[field]) for field in fields)
for i in sampleRange: if run_lengths:
d = historyData["popPingDropRate"][i] for field in rl_fields:
tot += d if field.startswith("run_"):
if d >= 1: csv_data.extend(str(substat) for substat in rl_stats[field])
totOne += d
runLength += 1
elif runLength > 0:
if initRun is None:
initRun = runLength
else:
if runLength <= 60:
secondRuns[runLength-1] += runLength
else: else:
minuteRuns[min((runLength-1)//60-1, 59)] += runLength csv_data.append(str(rl_stats[field]))
runLength = 0 print(",".join(csv_data))
elif initRun is None:
initRun = 0
if not historyData["scheduled"][i]:
totUnsched += 1
totUnschedD += d
if d >= 1:
totUnschedOne += d
if historyData["obstructed"][i]:
totObstruct += 1
totObstructD += d
if d >= 1:
totObstructOne += d
# If the entire sample set is one big drop run, it will be both initial
# fragment (continued from prior sample range) and final one (continued
# to next sample range), but to avoid double-reporting, just call it
# the initial run.
if initRun is None:
initRun = runLength
runLength = 0
if fVerbose:
print("Parsed samples: " + str(parseSamples))
print("Total ping drop: " + str(tot))
print("Count of drop == 1: " + str(totOne))
print("Obstructed: " + str(totObstruct))
print("Obstructed ping drop: " + str(totObstructD))
print("Obstructed drop == 1: " + str(totObstructOne))
print("Unscheduled: " + str(totUnsched))
print("Unscheduled ping drop: " + str(totUnschedD))
print("Unscheduled drop == 1: " + str(totUnschedOne))
if fRunLengths:
print("Initial drop run fragment: " + str(initRun))
print("Final drop run fragment: " + str(runLength))
print("Per-second drop runs: " + ", ".join(str(x) for x in secondRuns))
print("Per-minute drop runs: " + ", ".join(str(x) for x in minuteRuns))
else:
# NOTE: When changing data output format, also change the -H header printing above.
csvData = timestamp.replace(microsecond=0).isoformat() + "," + ",".join(str(x) for x in [parseSamples, tot, totOne, totObstruct, totObstructD, totObstructOne, totUnsched, totUnschedD, totUnschedOne])
if fRunLengths:
csvData += "," + ",".join(str(x) for x in chain([initRun, runLength], secondRuns, minuteRuns))
print(csvData)

235
starlink_grpc.py Normal file
View file

@ -0,0 +1,235 @@
"""Helpers for grpc communication with a Starlink user terminal.
This module may eventually contain more expansive parsing logic, but for now
it contains functions to parse the history data for some specific packet loss
statistics.
General ping drop (packet loss) statistics:
This group of statistics characterize the packet loss (labeled "ping drop"
in the field names of the Starlink gRPC service protocol) in various ways.
The sample interval is currently 1 second.
samples: The number of valid samples analyzed.
total_ping_drop: The total amount of time, in sample intervals, that
experienced ping drop.
count_full_ping_drop: The number of samples that experienced 100%
ping drop.
count_obstructed: The number of samples that were marked as
"obstructed", regardless of whether they experienced any ping
drop.
total_obstructed_ping_drop: The total amount of time, in sample
intervals, that experienced ping drop in samples marked as
"obstructed".
count_full_obstructed_ping_drop: The number of samples that were
marked as "obstructed" and that experienced 100% ping drop.
count_unscheduled: The number of samples that were not marked as
"scheduled", regardless of whether they experienced any ping
drop.
total_unscheduled_ping_drop: The total amount of time, in sample
intervals, that experienced ping drop in samples not marked as
"scheduled".
count_full_unscheduled_ping_drop: The number of samples that were
not marked as "scheduled" and that experienced 100% ping drop.
Total packet loss ratio can be computed with total_ping_drop / samples.
Ping drop run length statistics:
This group of statistics characterizes packet loss by how long a
consecutive run of 100% packet loss lasts.
init_run_fragment: The number of consecutive sample periods at the
start of the sample set that experienced 100% ping drop. This
period may be a continuation of a run that started prior to the
sample set, so is not counted in the following stats.
final_run_fragment: The number of consecutive sample periods at the
end of the sample set that experienced 100% ping drop. This
period may continue as a run beyond the end of the sample set, so
is not counted in the following stats.
run_seconds: A 60 element list. Each element records the total amount
of time, in sample intervals, that experienced 100% ping drop in
a consecutive run that lasted for (list index + 1) sample
intervals (seconds). That is, the first element contains time
spent in 1 sample runs, the second element contains time spent in
2 sample runs, etc.
run_minutes: A 60 element list. Each element records the total amount
of time, in sample intervals, that experienced 100% ping drop in
a consecutive run that lasted for more that (list index + 1)
multiples of 60 sample intervals (minutes), but less than or equal
to (list index + 2) multiples of 60 sample intervals. Except for
the last element in the list, which records the total amount of
time in runs of more than 60*60 samples.
No sample should be counted in more than one of the run length stats or
stat elements, so the total of all of them should be equal to
count_full_ping_drop from the general stats.
Samples that experience less than 100% ping drop are not counted in this
group of stats, even if they happen at the beginning or end of a run of
100% ping drop samples. To compute the amount of time that experienced
ping loss in less than a single run of 100% ping drop, use
(total_ping_drop - count_full_ping_drop) from the general stats.
"""
from itertools import chain
import grpc
import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc
def history_ping_field_names():
"""Return the field names of the packet loss stats.
Returns:
A tuple with 2 lists, the first with general stat names and the
second with ping drop run length stat names.
"""
return [
"samples",
"total_ping_drop",
"count_full_ping_drop",
"count_obstructed",
"total_obstructed_ping_drop",
"count_full_obstructed_ping_drop",
"count_unscheduled",
"total_unscheduled_ping_drop",
"count_full_unscheduled_ping_drop"
], [
"init_run_fragment",
"final_run_fragment",
"run_seconds",
"run_minutes"
]
def get_history():
"""Fetch history data and return it in grpc structure format.
Raises:
grpc.RpcError: Communication or service error.
"""
with grpc.insecure_channel("192.168.100.1:9200") as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_history={}))
return response.dish_get_history
def history_ping_stats(parse_samples, verbose=False):
"""Fetch, parse, and compute the packet loss stats.
Args:
parse_samples (int): Number of samples to process, or -1 to parse all
available samples.
verbose (bool): Optionally produce verbose output.
Returns:
On success, a tuple with 2 dicts, the first mapping general stat names
to their values and the second mapping ping drop run length stat names
to their values.
On failure, the tuple (None, None).
"""
try:
history = get_history()
except grpc.RpcError:
if verbose:
# RpcError is too verbose to print the details.
print("Failed getting history")
return None, None
# 'current' is the count of data samples written to the ring buffer,
# irrespective of buffer wrap.
current = int(history.current)
samples = len(history.pop_ping_drop_rate)
if verbose:
print("current counter: " + str(current))
print("All samples: " + str(samples))
samples = min(samples, current)
if verbose:
print("Valid samples: " + str(samples))
# This is ring buffer offset, so both index to oldest data sample and
# index to next data sample after the newest one.
offset = current % samples
tot = 0
count_full_drop = 0
count_unsched = 0
total_unsched_drop = 0
count_full_unsched = 0
count_obstruct = 0
total_obstruct_drop = 0
count_full_obstruct = 0
second_runs = [0] * 60
minute_runs = [0] * 60
run_length = 0
init_run_length = None
if parse_samples < 0 or samples < parse_samples:
parse_samples = samples
# Parse the most recent parse_samples-sized set of samples. This will
# iterate samples in order from oldest to newest.
if parse_samples <= offset:
sample_range = range(offset - parse_samples, offset)
else:
sample_range = chain(range(samples + offset - parse_samples, samples), range(0, offset))
for i in sample_range:
d = history.pop_ping_drop_rate[i]
tot += d
if d >= 1:
count_full_drop += d
run_length += 1
elif run_length > 0:
if init_run_length is None:
init_run_length = run_length
else:
if run_length <= 60:
second_runs[run_length - 1] += run_length
else:
minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length
run_length = 0
elif init_run_length is None:
init_run_length = 0
if not history.scheduled[i]:
count_unsched += 1
total_unsched_drop += d
if d >= 1:
count_full_unsched += d
# scheduled=false and obstructed=true do not ever appear to overlap,
# but in case they do in the future, treat that as just unscheduled
# in order to avoid double-counting it.
elif history.obstructed[i]:
count_obstruct += 1
total_obstruct_drop += d
if d >= 1:
count_full_obstruct += d
# If the entire sample set is one big drop run, it will be both initial
# fragment (continued from prior sample range) and final one (continued
# to next sample range), but to avoid double-reporting, just call it
# the initial run.
if init_run_length is None:
init_run_length = run_length
run_length = 0
return {
"samples": parse_samples,
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
"count_obstructed": count_obstruct,
"total_obstructed_ping_drop": total_obstruct_drop,
"count_full_obstructed_ping_drop": count_full_obstruct,
"count_unscheduled": count_unsched,
"total_unscheduled_ping_drop": total_unsched_drop,
"count_full_unscheduled_ping_drop": count_full_unsched
}, {
"init_run_fragment": init_run_length,
"final_run_fragment": run_length,
"run_seconds": second_runs,
"run_minutes": minute_runs
}

178
starlink_json.py Normal file
View file

@ -0,0 +1,178 @@
"""Parser for JSON format gRPC output from a Starlink user terminal.
Expects input as from grpcurl get_history request.
Handling output for other request responses may be added in the future, but
the others don't really need as much interpretation as the get_history
response does.
See the starlink_grpc module docstring for descriptions of the stat elements.
"""
import json
import sys
from itertools import chain
def history_ping_field_names():
"""Return the field names of the packet loss stats.
Returns:
A tuple with 2 lists, the first with general stat names and the
second with ping drop run length stat names.
"""
return [
"samples",
"total_ping_drop",
"count_full_ping_drop",
"count_obstructed",
"total_obstructed_ping_drop",
"count_full_obstructed_ping_drop",
"count_unscheduled",
"total_unscheduled_ping_drop",
"count_full_unscheduled_ping_drop"
], [
"init_run_fragment",
"final_run_fragment",
"run_seconds",
"run_minutes"
]
def get_history(filename):
"""Read JSON data and return the raw history in dict format.
Args:
filename (str): Filename from which to read JSON data, or "-" to read
from standard input.
"""
if filename == "-":
json_data = json.load(sys.stdin)
else:
json_file = open(filename)
try:
json_data = json.load(json_file)
finally:
json_file.close()
return json_data["dishGetHistory"]
def history_ping_stats(filename, parse_samples, verbose=False):
"""Fetch, parse, and compute the packet loss stats.
Args:
filename (str): Filename from which to read JSON data, or "-" to read
from standard input.
parse_samples (int): Number of samples to process, or -1 to parse all
available samples.
verbose (bool): Optionally produce verbose output.
Returns:
On success, a tuple with 2 dicts, the first mapping general stat names
to their values and the second mapping ping drop run length stat names
to their values.
On failure, the tuple (None, None).
"""
try:
history = get_history(filename)
except Exception as e:
if verbose:
print("Failed getting history: " + str(e))
return None, None
# "current" is the count of data samples written to the ring buffer,
# irrespective of buffer wrap.
current = int(history["current"])
samples = len(history["popPingDropRate"])
if verbose:
print("current counter: " + str(current))
print("All samples: " + str(samples))
samples = min(samples, current)
if verbose:
print("Valid samples: " + str(samples))
# This is ring buffer offset, so both index to oldest data sample and
# index to next data sample after the newest one.
offset = current % samples
tot = 0
count_full_drop = 0
count_unsched = 0
total_unsched_drop = 0
count_full_unsched = 0
count_obstruct = 0
total_obstruct_drop = 0
count_full_obstruct = 0
second_runs = [0] * 60
minute_runs = [0] * 60
run_length = 0
init_run_length = None
if parse_samples < 0 or samples < parse_samples:
parse_samples = samples
# Parse the most recent parse_samples-sized set of samples. This will
# iterate samples in order from oldest to newest.
if parse_samples <= offset:
sample_range = range(offset - parse_samples, offset)
else:
sample_range = chain(range(samples + offset - parse_samples, samples), range(0, offset))
for i in sample_range:
d = history["popPingDropRate"][i]
tot += d
if d >= 1:
count_full_drop += d
run_length += 1
elif run_length > 0:
if init_run_length is None:
init_run_length = run_length
else:
if run_length <= 60:
second_runs[run_length - 1] += run_length
else:
minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length
run_length = 0
elif init_run_length is None:
init_run_length = 0
if not history["scheduled"][i]:
count_unsched += 1
total_unsched_drop += d
if d >= 1:
count_full_unsched += d
# scheduled=false and obstructed=true do not ever appear to overlap,
# but in case they do in the future, treat that as just unscheduled
# in order to avoid double-counting it.
elif history["obstructed"][i]:
count_obstruct += 1
total_obstruct_drop += d
if d >= 1:
count_full_obstruct += d
# If the entire sample set is one big drop run, it will be both initial
# fragment (continued from prior sample range) and final one (continued
# to next sample range), but to avoid double-reporting, just call it
# the initial run.
if init_run_length is None:
init_run_length = run_length
run_length = 0
return {
"samples": parse_samples,
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
"count_obstructed": count_obstruct,
"total_obstructed_ping_drop": total_obstruct_drop,
"count_full_obstructed_ping_drop": count_full_obstruct,
"count_unscheduled": count_unsched,
"total_unscheduled_ping_drop": total_unsched_drop,
"count_full_unscheduled_ping_drop": count_full_unsched
}, {
"init_run_fragment": init_run_length,
"final_run_fragment": run_length,
"run_seconds": second_runs,
"run_minutes": minute_runs
}