Small change to the interface between parser and calling scripts

Move the "samples" stat out of the ping drop group of stats and into a new general stats group.

This way, it  will make more sense if/when additional stat groups are added, since that stat will apply to all of them.
This commit is contained in:
sparky8512 2021-01-08 19:17:34 -08:00
parent 96b7634a3d
commit 0ee39f61fd
4 changed files with 70 additions and 54 deletions

View file

@ -62,11 +62,12 @@ if print_usage or arg_error:
print(" -H: print CSV header instead of parsing file")
sys.exit(1 if arg_error else 0)
fields, rl_fields = starlink_grpc.history_ping_field_names()
g_fields, pd_fields, rl_fields = starlink_grpc.history_ping_field_names()
if print_header:
header = ["datetimestamp_utc"]
header.extend(fields)
header.extend(g_fields)
header.extend(pd_fields)
if run_lengths:
for field in rl_fields:
if field.startswith("run_"):
@ -78,23 +79,23 @@ if print_header:
timestamp = datetime.datetime.utcnow()
stats, rl_stats = starlink_grpc.history_ping_stats(-1 if parse_all else samples,
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(-1 if parse_all else samples,
verbose)
if stats is None or rl_stats is None:
if g_stats is None:
# verbose output already happened, so just bail.
sys.exit(1)
if verbose:
print("Parsed samples: " + str(stats["samples"]))
print("Total ping drop: " + str(stats["total_ping_drop"]))
print("Count of drop == 1: " + str(stats["count_full_ping_drop"]))
print("Obstructed: " + str(stats["count_obstructed"]))
print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"]))
print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"]))
print("Unscheduled: " + str(stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"]))
print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"]))
print("Parsed samples: " + str(g_stats["samples"]))
print("Total ping drop: " + str(pd_stats["total_ping_drop"]))
print("Count of drop == 1: " + str(pd_stats["count_full_ping_drop"]))
print("Obstructed: " + str(pd_stats["count_obstructed"]))
print("Obstructed ping drop: " + str(pd_stats["total_obstructed_ping_drop"]))
print("Obstructed drop == 1: " + str(pd_stats["count_full_obstructed_ping_drop"]))
print("Unscheduled: " + str(pd_stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(pd_stats["total_unscheduled_ping_drop"]))
print("Unscheduled drop == 1: " + str(pd_stats["count_full_unscheduled_ping_drop"]))
if run_lengths:
print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"]))
print("Final drop run fragment: " + str(rl_stats["final_run_fragment"]))
@ -102,7 +103,8 @@ if verbose:
print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"]))
else:
csv_data = [timestamp.replace(microsecond=0).isoformat()]
csv_data.extend(str(stats[field]) for field in fields)
csv_data.extend(str(g_stats[field]) for field in g_fields)
csv_data.extend(str(pd_stats[field]) for field in pd_fields)
if run_lengths:
for field in rl_fields:
if field.startswith("run_"):

View file

@ -66,11 +66,12 @@ if print_usage or arg_error:
print(" -H: print CSV header instead of parsing file")
sys.exit(1 if arg_error else 0)
fields, rl_fields = starlink_json.history_ping_field_names()
g_fields, pd_fields, rl_fields = starlink_json.history_ping_field_names()
if print_header:
header = ["datetimestamp_utc"]
header.extend(fields)
header.extend(g_fields)
header.extend(pd_fields)
if run_lengths:
for field in rl_fields:
if field.startswith("run_"):
@ -82,24 +83,24 @@ if print_header:
timestamp = datetime.datetime.utcnow()
stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-",
g_stats, pd_stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-",
-1 if parse_all else samples,
verbose)
if stats is None or rl_stats is None:
if g_stats is None:
# verbose output already happened, so just bail.
sys.exit(1)
if verbose:
print("Parsed samples: " + str(stats["samples"]))
print("Total ping drop: " + str(stats["total_ping_drop"]))
print("Count of drop == 1: " + str(stats["count_full_ping_drop"]))
print("Obstructed: " + str(stats["count_obstructed"]))
print("Obstructed ping drop: " + str(stats["total_obstructed_ping_drop"]))
print("Obstructed drop == 1: " + str(stats["count_full_obstructed_ping_drop"]))
print("Unscheduled: " + str(stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(stats["total_unscheduled_ping_drop"]))
print("Unscheduled drop == 1: " + str(stats["count_full_unscheduled_ping_drop"]))
print("Parsed samples: " + str(g_stats["samples"]))
print("Total ping drop: " + str(pd_stats["total_ping_drop"]))
print("Count of drop == 1: " + str(pd_stats["count_full_ping_drop"]))
print("Obstructed: " + str(pd_stats["count_obstructed"]))
print("Obstructed ping drop: " + str(pd_stats["total_obstructed_ping_drop"]))
print("Obstructed drop == 1: " + str(pd_stats["count_full_obstructed_ping_drop"]))
print("Unscheduled: " + str(pd_stats["count_unscheduled"]))
print("Unscheduled ping drop: " + str(pd_stats["total_unscheduled_ping_drop"]))
print("Unscheduled drop == 1: " + str(pd_stats["count_full_unscheduled_ping_drop"]))
if run_lengths:
print("Initial drop run fragment: " + str(rl_stats["init_run_fragment"]))
print("Final drop run fragment: " + str(rl_stats["final_run_fragment"]))
@ -107,7 +108,8 @@ if verbose:
print("Per-minute drop runs: " + ", ".join(str(x) for x in rl_stats["run_minutes"]))
else:
csv_data = [timestamp.replace(microsecond=0).isoformat()]
csv_data.extend(str(stats[field]) for field in fields)
csv_data.extend(str(g_stats[field]) for field in g_fields)
csv_data.extend(str(pd_stats[field]) for field in pd_fields)
if run_lengths:
for field in rl_fields:
if field.startswith("run_"):

View file

@ -4,13 +4,17 @@ This module may eventually contain more expansive parsing logic, but for now
it contains functions to parse the history data for some specific packet loss
statistics.
General ping drop (packet loss) statistics:
This group of statistics characterize the packet loss (labeled "ping drop"
in the field names of the Starlink gRPC service protocol) in various ways.
General statistics:
This group of statistics contains data relevant to all the other groups.
The sample interval is currently 1 second.
samples: The number of valid samples analyzed.
General ping drop (packet loss) statistics:
This group of statistics characterize the packet loss (labeled "ping drop"
in the field names of the Starlink gRPC service protocol) in various ways.
total_ping_drop: The total amount of time, in sample intervals, that
experienced ping drop.
count_full_ping_drop: The number of samples that experienced 100%
@ -62,13 +66,13 @@ Ping drop run length statistics:
No sample should be counted in more than one of the run length stats or
stat elements, so the total of all of them should be equal to
count_full_ping_drop from the general stats.
count_full_ping_drop from the ping drop stats.
Samples that experience less than 100% ping drop are not counted in this
group of stats, even if they happen at the beginning or end of a run of
100% ping drop samples. To compute the amount of time that experienced
ping loss in less than a single run of 100% ping drop, use
(total_ping_drop - count_full_ping_drop) from the general stats.
(total_ping_drop - count_full_ping_drop) from the ping drop stats.
"""
from itertools import chain
@ -82,11 +86,13 @@ def history_ping_field_names():
"""Return the field names of the packet loss stats.
Returns:
A tuple with 2 lists, the first with general stat names and the
second with ping drop run length stat names.
A tuple with 3 lists, the first with general stat names, the second
with ping drop stat names, and the third with ping drop run length
stat names.
"""
return [
"samples",
"samples"
], [
"total_ping_drop",
"count_full_ping_drop",
"count_obstructed",
@ -122,11 +128,12 @@ def history_ping_stats(parse_samples, verbose=False):
verbose (bool): Optionally produce verbose output.
Returns:
On success, a tuple with 2 dicts, the first mapping general stat names
to their values and the second mapping ping drop run length stat names
to their values.
On success, a tuple with 3 dicts, the first mapping general stat names
to their values, the second mapping ping drop stat names to their
values and the third mapping ping drop run length stat names to their
values.
On failure, the tuple (None, None).
On failure, the tuple (None, None, None).
"""
try:
history = get_history()
@ -134,7 +141,7 @@ def history_ping_stats(parse_samples, verbose=False):
if verbose:
# RpcError is too verbose to print the details.
print("Failed getting history")
return None, None
return None, None, None
# 'current' is the count of data samples written to the ring buffer,
# irrespective of buffer wrap.
@ -218,7 +225,8 @@ def history_ping_stats(parse_samples, verbose=False):
run_length = 0
return {
"samples": parse_samples,
"samples": parse_samples
}, {
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
"count_obstructed": count_obstruct,

View file

@ -18,11 +18,13 @@ def history_ping_field_names():
"""Return the field names of the packet loss stats.
Returns:
A tuple with 2 lists, the first with general stat names and the
second with ping drop run length stat names.
A tuple with 3 lists, the first with general stat names, the second
with ping drop stat names, and the third with ping drop run length
stat names.
"""
return [
"samples",
"samples"
], [
"total_ping_drop",
"count_full_ping_drop",
"count_obstructed",
@ -66,18 +68,19 @@ def history_ping_stats(filename, parse_samples, verbose=False):
verbose (bool): Optionally produce verbose output.
Returns:
On success, a tuple with 2 dicts, the first mapping general stat names
to their values and the second mapping ping drop run length stat names
to their values.
On success, a tuple with 3 dicts, the first mapping general stat names
to their values, the second mapping ping drop stat names to their
values and the third mapping ping drop run length stat names to their
values.
On failure, the tuple (None, None).
On failure, the tuple (None, None, None).
"""
try:
history = get_history(filename)
except Exception as e:
if verbose:
print("Failed getting history: " + str(e))
return None, None
return None, None, None
# "current" is the count of data samples written to the ring buffer,
# irrespective of buffer wrap.
@ -161,7 +164,8 @@ def history_ping_stats(filename, parse_samples, verbose=False):
run_length = 0
return {
"samples": parse_samples,
"samples": parse_samples
}, {
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
"count_obstructed": count_obstruct,