diff --git a/dishHistoryStats.py b/dishHistoryStats.py index 1a11d78..b9aeb58 100644 --- a/dishHistoryStats.py +++ b/dishHistoryStats.py @@ -2,7 +2,7 @@ ###################################################################### # # Equivalent script to parseJsonHistory.py, except integrating the -# gRPC calls, instead of relying on separatate invocation of grpcurl. +# gRPC calls, instead of relying on separate invocation of grpcurl. # # This script examines the most recent samples from the history data # and computes several different metrics related to packet loss. By @@ -10,181 +10,103 @@ # ###################################################################### -import grpc - -import spacex.api.device.device_pb2 -import spacex.api.device.device_pb2_grpc - import datetime import sys import getopt -from itertools import chain +import starlink_grpc - -fArgError = False +arg_error = False try: opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH") except getopt.GetoptError as err: print(str(err)) - fArgError = True + arg_error = True # Default to 1 hour worth of data samples. -parseSamples = 3600 -fUsage = False -fVerbose = False -fParseAll = False -fHeader = False -fRunLengths = False +samples_default = 3600 +samples = samples_default +print_usage = False +verbose = False +parse_all = False +print_header = False +run_lengths = False -if not fArgError: +if not arg_error: if len(args) > 0: - fArgError = True + arg_error = True else: for opt, arg in opts: if opt == "-a": - fParseAll = True + parse_all = True elif opt == "-h": - fUsage = True + print_usage = True elif opt == "-r": - fRunLengths = True + run_lengths = True elif opt == "-s": - parseSamples = int(arg) + samples = int(arg) elif opt == "-v": - fVerbose = True + verbose = True elif opt == "-H": - fHeader = True + print_header = True -if fUsage or fArgError: - print("Usage: "+sys.argv[0]+" [options...]") +if print_usage or arg_error: + print("Usage: " + sys.argv[0] + " [options...]") print("Options:") print(" -a: Parse all valid samples") print(" -h: Be helpful") print(" -r: Include ping drop run length stats") - print(" -s : Parse data samples, default: "+str(parseSamples)) + print(" -s : Parse data samples, default: " + str(samples_default)) print(" -v: Be verbose") print(" -H: print CSV header instead of parsing file") - sys.exit(1 if fArgError else 0) + sys.exit(1 if arg_error else 0) -if fHeader: - header = "datetimestamp_utc,samples,total_ping_drop,count_full_ping_drop,count_obstructed,total_obstructed_ping_drop,count_full_obstructed_ping_drop,count_unscheduled,total_unscheduled_ping_drop,count_full_unscheduled_ping_drop" - if fRunLengths: - header+= ",init_run_fragment,final_run_fragment," - header += ",".join("run_seconds_" + str(x) for x in range(1, 61)) + "," - header += ",".join("run_minutes_" + str(x) for x in range(1, 60)) - header += ",run_minutes_60_or_greater" - print(header) +fields, rl_fields = starlink_grpc.history_ping_field_names() + +if print_header: + header = ["datetimestamp_utc"] + header.extend(fields) + if run_lengths: + for field in rl_fields: + if field.startswith("run_"): + header.extend(field + "_" + str(x) for x in range(1, 61)) + else: + header.append(field) + print(",".join(header)) sys.exit(0) -with grpc.insecure_channel('192.168.100.1:9200') as channel: - stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) - response = stub.Handle(spacex.api.device.device_pb2.Request(get_history={})) - timestamp = datetime.datetime.utcnow() -historyData = response.dish_get_history +stats, rl_stats = starlink_grpc.history_ping_stats(-1 if parse_all else samples, + verbose) -# 'current' is the count of data samples written to the ring buffer, -# irrespective of buffer wrap. -current = int(historyData.current) -nSamples = len(historyData.pop_ping_drop_rate) +if stats is None or rl_stats is None: + # verbose output already happened, so just bail. + sys.exit(1) -if fVerbose: - print("current: " + str(current)) - print("All samples: " + str(nSamples)) - -nSamples = min(nSamples,current) - -if fVerbose: - print("Valid samples: " + str(nSamples)) - -# This is ring buffer offset, so both index to oldest data sample and -# index to next data sample after the newest one. -offset = current % nSamples - -tot = 0 -totOne = 0 -totUnsched = 0 -totUnschedD = 0 -totUnschedOne = 0 -totObstruct = 0 -totObstructD = 0 -totObstructOne = 0 - -secondRuns = [0] * 60 -minuteRuns = [0] * 60 -runLength = 0 -initRun = None - -if fParseAll or nSamples < parseSamples: - parseSamples = nSamples - -# Parse the most recent parseSamples-sized set of samples. This will -# iterate samples in order from oldest to newest, although that's not -# actually required for the current set of stats being computed below. -if parseSamples <= offset: - sampleRange = range(offset - parseSamples, offset) +if verbose: + print("Parsed samples: " + str(stats["samples"])) + print("Total ping drop: " + str(stats["total_ping_drop"])) + print("Count of drop == 1: " + str(stats["count_full_ping_drop"])) + print("Obstructed: " + str(stats["count_obstructed"])) + print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"])) + print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"])) + print("Unscheduled: " + str(stats["count_unscheduled"])) + print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"])) + print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"])) + if run_lengths: + print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"])) + print("Final drop run fragment: " + str(rl_stats["final_run_fragment"])) + print("Per-second drop runs: " + ", ".join(str(x) for x in rl_stats["run_seconds"])) + print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"])) else: - sampleRange = chain(range(nSamples + offset - parseSamples, nSamples), range(0, offset)) - -for i in sampleRange: - d = historyData.pop_ping_drop_rate[i] - tot += d - if d >= 1: - totOne += d - runLength += 1 - elif runLength > 0: - if initRun is None: - initRun = runLength - else: - if runLength <= 60: - secondRuns[runLength-1] += runLength + csv_data = [timestamp.replace(microsecond=0).isoformat()] + csv_data.extend(str(stats[field]) for field in fields) + if run_lengths: + for field in rl_fields: + if field.startswith("run_"): + csv_data.extend(str(substat) for substat in rl_stats[field]) else: - minuteRuns[min((runLength-1)//60-1, 59)] += runLength - runLength = 0 - elif initRun is None: - initRun = 0 - if not historyData.scheduled[i]: - totUnsched += 1 - totUnschedD += d - if d >= 1: - totUnschedOne += d - # scheduled=false and obstructed=true do not ever appear to overlap, - # but in case they do in the future, treat that as just unscheduled - # in order to avoid double-counting it. - elif historyData.obstructed[i]: - totObstruct += 1 - totObstructD += d - if d >= 1: - totObstructOne += d - -# If the entire sample set is one big drop run, it will be both initial -# fragment (continued from prior sample range) and final one (continued -# to next sample range), but to avoid double-reporting, just call it -# the initial run. -if initRun is None: - initRun = runLength - runLength = 0 - -if fVerbose: - print("Parsed samples: " + str(parseSamples)) - print("Total ping drop: " + str(tot)) - print("Count of drop == 1: " + str(totOne)) - print("Obstructed: " + str(totObstruct)) - print("Obstructed ping drop: " + str(totObstructD)) - print("Obstructed drop == 1: " + str(totObstructOne)) - print("Unscheduled: " + str(totUnsched)) - print("Unscheduled ping drop: " + str(totUnschedD)) - print("Unscheduled drop == 1: " + str(totUnschedOne)) - if fRunLengths: - print("Initial drop run fragment: " + str(initRun)) - print("Final drop run fragment: " + str(runLength)) - print("Per-second drop runs: " + ", ".join(str(x) for x in secondRuns)) - print("Per-minute drop runs: " + ", ".join(str(x) for x in minuteRuns)) -else: - # NOTE: When changing data output format, also change the -H header printing above. - csvData = timestamp.replace(microsecond=0).isoformat() + "," + ",".join(str(x) for x in [parseSamples, tot, totOne, totObstruct, totObstructD, totObstructOne, totUnsched, totUnschedD, totUnschedOne]) - if fRunLengths: - csvData += "," + ",".join(str(x) for x in chain([initRun, runLength], secondRuns, minuteRuns)) - print(csvData) + csv_data.append(str(rl_stats[field])) + print(",".join(csv_data)) diff --git a/parseJsonHistory.py b/parseJsonHistory.py index 0817914..9357847 100644 --- a/parseJsonHistory.py +++ b/parseJsonHistory.py @@ -13,181 +13,105 @@ # ###################################################################### -import json import datetime import sys import getopt -from itertools import chain +import starlink_json -fArgError = False +arg_error = False try: opts, args = getopt.getopt(sys.argv[1:], "ahrs:vH") except getopt.GetoptError as err: print(str(err)) - fArgError = True + arg_error = True # Default to 1 hour worth of data samples. -parseSamples = 3600 -fUsage = False -fVerbose = False -fParseAll = False -fHeader = False -fRunLengths = False +samples_default = 3600 +samples = samples_default +print_usage = False +verbose = False +parse_all = False +print_header = False +run_lengths = False -if not fArgError: +if not arg_error: if len(args) > 1: - fArgError = True + arg_error = True else: for opt, arg in opts: if opt == "-a": - fParseAll = True + parse_all = True elif opt == "-h": - fUsage = True + print_usage = True elif opt == "-r": - fRunLengths = True + run_lengths = True elif opt == "-s": - parseSamples = int(arg) + samples = int(arg) elif opt == "-v": - fVerbose = True + verbose = True elif opt == "-H": - fHeader = True + print_header = True -if fUsage or fArgError: - print("Usage: "+sys.argv[0]+" [options...] []") +if print_usage or arg_error: + print("Usage: " + sys.argv[0] + " [options...] []") print(" where is the file to parse, default: stdin") print("Options:") print(" -a: Parse all valid samples") print(" -h: Be helpful") print(" -r: Include ping drop run length stats") - print(" -s : Parse data samples, default: "+str(parseSamples)) + print(" -s : Parse data samples, default: " + str(samples_default)) print(" -v: Be verbose") print(" -H: print CSV header instead of parsing file") - sys.exit(1 if fArgError else 0) + sys.exit(1 if arg_error else 0) -if fHeader: - header = "datetimestamp_utc,samples,total_ping_drop,count_full_ping_drop,count_obstructed,total_obstructed_ping_drop,count_full_obstructed_ping_drop,count_unscheduled,total_unscheduled_ping_drop,count_full_unscheduled_ping_drop" - if fRunLengths: - header+= ",init_run_fragment,final_run_fragment," - header += ",".join("run_seconds_" + str(x) for x in range(1, 61)) + "," - header += ",".join("run_minutes_" + str(x) for x in range(1, 60)) - header += ",run_minutes_60_or_greater" - print(header) +fields, rl_fields = starlink_json.history_ping_field_names() + +if print_header: + header = ["datetimestamp_utc"] + header.extend(fields) + if run_lengths: + for field in rl_fields: + if field.startswith("run_"): + header.extend(field + "_" + str(x) for x in range(1, 61)) + else: + header.append(field) + print(",".join(header)) sys.exit(0) -# Allow "-" to be specified as file for stdin. -if len(args) == 0 or args[0] == "-": - jsonData = json.load(sys.stdin) -else: - jsonFile = open(args[0]) - jsonData = json.load(jsonFile) - jsonFile.close() - timestamp = datetime.datetime.utcnow() -historyData = jsonData['dishGetHistory'] +stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-", + -1 if parse_all else samples, + verbose) -# 'current' is the count of data samples written to the ring buffer, -# irrespective of buffer wrap. -current = int(historyData['current']) -nSamples = len(historyData['popPingDropRate']) +if stats is None or rl_stats is None: + # verbose output already happened, so just bail. + sys.exit(1) -if fVerbose: - print("current: " + str(current)) - print("All samples: " + str(nSamples)) - -nSamples = min(nSamples,current) - -if fVerbose: - print("Valid samples: " + str(nSamples)) - -# This is ring buffer offset, so both index to oldest data sample and -# index to next data sample after the newest one. -offset = current % nSamples - -tot = 0 -totOne = 0 -totUnsched = 0 -totUnschedD = 0 -totUnschedOne = 0 -totObstruct = 0 -totObstructD = 0 -totObstructOne = 0 - -secondRuns = [0] * 60 -minuteRuns = [0] * 60 -runLength = 0 -initRun = None - -if fParseAll or nSamples < parseSamples: - parseSamples = nSamples - -# Parse the most recent parseSamples-sized set of samples. This will -# iterate samples in order from oldest to newest, although that's not -# actually required for the current set of stats being computed below. -if parseSamples <= offset: - sampleRange = range(offset - parseSamples, offset) +if verbose: + print("Parsed samples: " + str(stats["samples"])) + print("Total ping drop: " + str(stats["total_ping_drop"])) + print("Count of drop == 1: " + str(stats["count_full_ping_drop"])) + print("Obstructed: " + str(stats["count_obstructed"])) + print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"])) + print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"])) + print("Unscheduled: " + str(stats["count_unscheduled"])) + print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"])) + print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"])) + if run_lengths: + print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"])) + print("Final drop run fragment: " + str(rl_stats["final_run_fragment"])) + print("Per-second drop runs: " + ", ".join(str(x) for x in rl_stats["run_seconds"])) + print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"])) else: - sampleRange = chain(range(nSamples + offset - parseSamples, nSamples), range(0, offset)) - -for i in sampleRange: - d = historyData["popPingDropRate"][i] - tot += d - if d >= 1: - totOne += d - runLength += 1 - elif runLength > 0: - if initRun is None: - initRun = runLength - else: - if runLength <= 60: - secondRuns[runLength-1] += runLength + csv_data = [timestamp.replace(microsecond=0).isoformat()] + csv_data.extend(str(stats[field]) for field in fields) + if run_lengths: + for field in rl_fields: + if field.startswith("run_"): + csv_data.extend(str(substat) for substat in rl_stats[field]) else: - minuteRuns[min((runLength-1)//60-1, 59)] += runLength - runLength = 0 - elif initRun is None: - initRun = 0 - if not historyData["scheduled"][i]: - totUnsched += 1 - totUnschedD += d - if d >= 1: - totUnschedOne += d - # scheduled=false and obstructed=true do not ever appear to overlap, - # but in case they do in the future, treat that as just unscheduled - # in order to avoid double-counting it. - elif historyData["obstructed"][i]: - totObstruct += 1 - totObstructD += d - if d >= 1: - totObstructOne += d - -# If the entire sample set is one big drop run, it will be both initial -# fragment (continued from prior sample range) and final one (continued -# to next sample range), but to avoid double-reporting, just call it -# the initial run. -if initRun is None: - initRun = runLength - runLength = 0 - -if fVerbose: - print("Parsed samples: " + str(parseSamples)) - print("Total ping drop: " + str(tot)) - print("Count of drop == 1: " + str(totOne)) - print("Obstructed: " + str(totObstruct)) - print("Obstructed ping drop: " + str(totObstructD)) - print("Obstructed drop == 1: " + str(totObstructOne)) - print("Unscheduled: " + str(totUnsched)) - print("Unscheduled ping drop: " + str(totUnschedD)) - print("Unscheduled drop == 1: " + str(totUnschedOne)) - if fRunLengths: - print("Initial drop run fragment: " + str(initRun)) - print("Final drop run fragment: " + str(runLength)) - print("Per-second drop runs: " + ", ".join(str(x) for x in secondRuns)) - print("Per-minute drop runs: " + ", ".join(str(x) for x in minuteRuns)) -else: - # NOTE: When changing data output format, also change the -H header printing above. - csvData = timestamp.replace(microsecond=0).isoformat() + "," + ",".join(str(x) for x in [parseSamples, tot, totOne, totObstruct, totObstructD, totObstructOne, totUnsched, totUnschedD, totUnschedOne]) - if fRunLengths: - csvData += "," + ",".join(str(x) for x in chain([initRun, runLength], secondRuns, minuteRuns)) - print(csvData) + csv_data.append(str(rl_stats[field])) + print(",".join(csv_data)) diff --git a/starlink_grpc.py b/starlink_grpc.py new file mode 100644 index 0000000..3401863 --- /dev/null +++ b/starlink_grpc.py @@ -0,0 +1,235 @@ +"""Helpers for grpc communication with a Starlink user terminal. + +This module may eventually contain more expansive parsing logic, but for now +it contains functions to parse the history data for some specific packet loss +statistics. + +General ping drop (packet loss) statistics: + This group of statistics characterize the packet loss (labeled "ping drop" + in the field names of the Starlink gRPC service protocol) in various ways. + + The sample interval is currently 1 second. + + samples: The number of valid samples analyzed. + total_ping_drop: The total amount of time, in sample intervals, that + experienced ping drop. + count_full_ping_drop: The number of samples that experienced 100% + ping drop. + count_obstructed: The number of samples that were marked as + "obstructed", regardless of whether they experienced any ping + drop. + total_obstructed_ping_drop: The total amount of time, in sample + intervals, that experienced ping drop in samples marked as + "obstructed". + count_full_obstructed_ping_drop: The number of samples that were + marked as "obstructed" and that experienced 100% ping drop. + count_unscheduled: The number of samples that were not marked as + "scheduled", regardless of whether they experienced any ping + drop. + total_unscheduled_ping_drop: The total amount of time, in sample + intervals, that experienced ping drop in samples not marked as + "scheduled". + count_full_unscheduled_ping_drop: The number of samples that were + not marked as "scheduled" and that experienced 100% ping drop. + + Total packet loss ratio can be computed with total_ping_drop / samples. + +Ping drop run length statistics: + This group of statistics characterizes packet loss by how long a + consecutive run of 100% packet loss lasts. + + init_run_fragment: The number of consecutive sample periods at the + start of the sample set that experienced 100% ping drop. This + period may be a continuation of a run that started prior to the + sample set, so is not counted in the following stats. + final_run_fragment: The number of consecutive sample periods at the + end of the sample set that experienced 100% ping drop. This + period may continue as a run beyond the end of the sample set, so + is not counted in the following stats. + run_seconds: A 60 element list. Each element records the total amount + of time, in sample intervals, that experienced 100% ping drop in + a consecutive run that lasted for (list index + 1) sample + intervals (seconds). That is, the first element contains time + spent in 1 sample runs, the second element contains time spent in + 2 sample runs, etc. + run_minutes: A 60 element list. Each element records the total amount + of time, in sample intervals, that experienced 100% ping drop in + a consecutive run that lasted for more that (list index + 1) + multiples of 60 sample intervals (minutes), but less than or equal + to (list index + 2) multiples of 60 sample intervals. Except for + the last element in the list, which records the total amount of + time in runs of more than 60*60 samples. + + No sample should be counted in more than one of the run length stats or + stat elements, so the total of all of them should be equal to + count_full_ping_drop from the general stats. + + Samples that experience less than 100% ping drop are not counted in this + group of stats, even if they happen at the beginning or end of a run of + 100% ping drop samples. To compute the amount of time that experienced + ping loss in less than a single run of 100% ping drop, use + (total_ping_drop - count_full_ping_drop) from the general stats. +""" + +from itertools import chain + +import grpc + +import spacex.api.device.device_pb2 +import spacex.api.device.device_pb2_grpc + +def history_ping_field_names(): + """Return the field names of the packet loss stats. + + Returns: + A tuple with 2 lists, the first with general stat names and the + second with ping drop run length stat names. + """ + return [ + "samples", + "total_ping_drop", + "count_full_ping_drop", + "count_obstructed", + "total_obstructed_ping_drop", + "count_full_obstructed_ping_drop", + "count_unscheduled", + "total_unscheduled_ping_drop", + "count_full_unscheduled_ping_drop" + ], [ + "init_run_fragment", + "final_run_fragment", + "run_seconds", + "run_minutes" + ] + +def get_history(): + """Fetch history data and return it in grpc structure format. + + Raises: + grpc.RpcError: Communication or service error. + """ + with grpc.insecure_channel("192.168.100.1:9200") as channel: + stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) + response = stub.Handle(spacex.api.device.device_pb2.Request(get_history={})) + return response.dish_get_history + +def history_ping_stats(parse_samples, verbose=False): + """Fetch, parse, and compute the packet loss stats. + + Args: + parse_samples (int): Number of samples to process, or -1 to parse all + available samples. + verbose (bool): Optionally produce verbose output. + + Returns: + On success, a tuple with 2 dicts, the first mapping general stat names + to their values and the second mapping ping drop run length stat names + to their values. + + On failure, the tuple (None, None). + """ + try: + history = get_history() + except grpc.RpcError: + if verbose: + # RpcError is too verbose to print the details. + print("Failed getting history") + return None, None + + # 'current' is the count of data samples written to the ring buffer, + # irrespective of buffer wrap. + current = int(history.current) + samples = len(history.pop_ping_drop_rate) + + if verbose: + print("current counter: " + str(current)) + print("All samples: " + str(samples)) + + samples = min(samples, current) + + if verbose: + print("Valid samples: " + str(samples)) + + # This is ring buffer offset, so both index to oldest data sample and + # index to next data sample after the newest one. + offset = current % samples + + tot = 0 + count_full_drop = 0 + count_unsched = 0 + total_unsched_drop = 0 + count_full_unsched = 0 + count_obstruct = 0 + total_obstruct_drop = 0 + count_full_obstruct = 0 + + second_runs = [0] * 60 + minute_runs = [0] * 60 + run_length = 0 + init_run_length = None + + if parse_samples < 0 or samples < parse_samples: + parse_samples = samples + + # Parse the most recent parse_samples-sized set of samples. This will + # iterate samples in order from oldest to newest. + if parse_samples <= offset: + sample_range = range(offset - parse_samples, offset) + else: + sample_range = chain(range(samples + offset - parse_samples, samples), range(0, offset)) + + for i in sample_range: + d = history.pop_ping_drop_rate[i] + tot += d + if d >= 1: + count_full_drop += d + run_length += 1 + elif run_length > 0: + if init_run_length is None: + init_run_length = run_length + else: + if run_length <= 60: + second_runs[run_length - 1] += run_length + else: + minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length + run_length = 0 + elif init_run_length is None: + init_run_length = 0 + if not history.scheduled[i]: + count_unsched += 1 + total_unsched_drop += d + if d >= 1: + count_full_unsched += d + # scheduled=false and obstructed=true do not ever appear to overlap, + # but in case they do in the future, treat that as just unscheduled + # in order to avoid double-counting it. + elif history.obstructed[i]: + count_obstruct += 1 + total_obstruct_drop += d + if d >= 1: + count_full_obstruct += d + + # If the entire sample set is one big drop run, it will be both initial + # fragment (continued from prior sample range) and final one (continued + # to next sample range), but to avoid double-reporting, just call it + # the initial run. + if init_run_length is None: + init_run_length = run_length + run_length = 0 + + return { + "samples": parse_samples, + "total_ping_drop": tot, + "count_full_ping_drop": count_full_drop, + "count_obstructed": count_obstruct, + "total_obstructed_ping_drop": total_obstruct_drop, + "count_full_obstructed_ping_drop": count_full_obstruct, + "count_unscheduled": count_unsched, + "total_unscheduled_ping_drop": total_unsched_drop, + "count_full_unscheduled_ping_drop": count_full_unsched + }, { + "init_run_fragment": init_run_length, + "final_run_fragment": run_length, + "run_seconds": second_runs, + "run_minutes": minute_runs + } diff --git a/starlink_json.py b/starlink_json.py new file mode 100644 index 0000000..ca70547 --- /dev/null +++ b/starlink_json.py @@ -0,0 +1,178 @@ +"""Parser for JSON format gRPC output from a Starlink user terminal. + +Expects input as from grpcurl get_history request. + +Handling output for other request responses may be added in the future, but +the others don't really need as much interpretation as the get_history +response does. + +See the starlink_grpc module docstring for descriptions of the stat elements. +""" + +import json +import sys + +from itertools import chain + +def history_ping_field_names(): + """Return the field names of the packet loss stats. + + Returns: + A tuple with 2 lists, the first with general stat names and the + second with ping drop run length stat names. + """ + return [ + "samples", + "total_ping_drop", + "count_full_ping_drop", + "count_obstructed", + "total_obstructed_ping_drop", + "count_full_obstructed_ping_drop", + "count_unscheduled", + "total_unscheduled_ping_drop", + "count_full_unscheduled_ping_drop" + ], [ + "init_run_fragment", + "final_run_fragment", + "run_seconds", + "run_minutes" + ] + +def get_history(filename): + """Read JSON data and return the raw history in dict format. + + Args: + filename (str): Filename from which to read JSON data, or "-" to read + from standard input. + """ + if filename == "-": + json_data = json.load(sys.stdin) + else: + json_file = open(filename) + try: + json_data = json.load(json_file) + finally: + json_file.close() + return json_data["dishGetHistory"] + +def history_ping_stats(filename, parse_samples, verbose=False): + """Fetch, parse, and compute the packet loss stats. + + Args: + filename (str): Filename from which to read JSON data, or "-" to read + from standard input. + parse_samples (int): Number of samples to process, or -1 to parse all + available samples. + verbose (bool): Optionally produce verbose output. + + Returns: + On success, a tuple with 2 dicts, the first mapping general stat names + to their values and the second mapping ping drop run length stat names + to their values. + + On failure, the tuple (None, None). + """ + try: + history = get_history(filename) + except Exception as e: + if verbose: + print("Failed getting history: " + str(e)) + return None, None + + # "current" is the count of data samples written to the ring buffer, + # irrespective of buffer wrap. + current = int(history["current"]) + samples = len(history["popPingDropRate"]) + + if verbose: + print("current counter: " + str(current)) + print("All samples: " + str(samples)) + + samples = min(samples, current) + + if verbose: + print("Valid samples: " + str(samples)) + + # This is ring buffer offset, so both index to oldest data sample and + # index to next data sample after the newest one. + offset = current % samples + + tot = 0 + count_full_drop = 0 + count_unsched = 0 + total_unsched_drop = 0 + count_full_unsched = 0 + count_obstruct = 0 + total_obstruct_drop = 0 + count_full_obstruct = 0 + + second_runs = [0] * 60 + minute_runs = [0] * 60 + run_length = 0 + init_run_length = None + + if parse_samples < 0 or samples < parse_samples: + parse_samples = samples + + # Parse the most recent parse_samples-sized set of samples. This will + # iterate samples in order from oldest to newest. + if parse_samples <= offset: + sample_range = range(offset - parse_samples, offset) + else: + sample_range = chain(range(samples + offset - parse_samples, samples), range(0, offset)) + + for i in sample_range: + d = history["popPingDropRate"][i] + tot += d + if d >= 1: + count_full_drop += d + run_length += 1 + elif run_length > 0: + if init_run_length is None: + init_run_length = run_length + else: + if run_length <= 60: + second_runs[run_length - 1] += run_length + else: + minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length + run_length = 0 + elif init_run_length is None: + init_run_length = 0 + if not history["scheduled"][i]: + count_unsched += 1 + total_unsched_drop += d + if d >= 1: + count_full_unsched += d + # scheduled=false and obstructed=true do not ever appear to overlap, + # but in case they do in the future, treat that as just unscheduled + # in order to avoid double-counting it. + elif history["obstructed"][i]: + count_obstruct += 1 + total_obstruct_drop += d + if d >= 1: + count_full_obstruct += d + + # If the entire sample set is one big drop run, it will be both initial + # fragment (continued from prior sample range) and final one (continued + # to next sample range), but to avoid double-reporting, just call it + # the initial run. + if init_run_length is None: + init_run_length = run_length + run_length = 0 + + return { + "samples": parse_samples, + "total_ping_drop": tot, + "count_full_ping_drop": count_full_drop, + "count_obstructed": count_obstruct, + "total_obstructed_ping_drop": total_obstruct_drop, + "count_full_obstructed_ping_drop": count_full_obstruct, + "count_unscheduled": count_unsched, + "total_unscheduled_ping_drop": total_unsched_drop, + "count_full_unscheduled_ping_drop": count_full_unsched + }, { + "init_run_fragment": init_run_length, + "final_run_fragment": run_length, + "run_seconds": second_runs, + "run_minutes": minute_runs + }