Fix remaining pylint and yapf nits

This commit is contained in:
sparky8512 2021-01-15 19:27:10 -08:00
parent 46f65a6214
commit 3fafcea882
8 changed files with 42 additions and 36 deletions

View file

@ -152,13 +152,13 @@ def main():
gstate.dish_id = None
gstate.points = []
def conn_error(msg):
def conn_error(msg, *args):
# Connection errors that happen in an interval loop are not critical
# failures, but are interesting enough to print in non-verbose mode.
if loop_time > 0:
print(msg)
print(msg % args)
else:
logging.error(msg)
logging.error(msg, *args)
def flush_points(client):
try:
@ -167,7 +167,7 @@ def main():
print("Data points written: " + str(len(gstate.points)))
gstate.points.clear()
except Exception as e:
conn_error("Failed writing to InfluxDB database: " + str(e))
conn_error("Failed writing to InfluxDB database: %s", str(e))
return 1
return 0
@ -179,7 +179,7 @@ def main():
if verbose:
print("Using dish ID: " + gstate.dish_id)
except starlink_grpc.GrpcError as e:
conn_error("Failure getting dish ID: " + str(e))
conn_error("Failure getting dish ID: %s", str(e))
return 1
timestamp = datetime.datetime.utcnow()
@ -187,7 +187,7 @@ def main():
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
conn_error("Failure getting ping stats: " + str(e))
conn_error("Failure getting ping stats: %s", str(e))
return 1
all_stats = g_stats.copy()

View file

@ -124,13 +124,13 @@ def main():
gstate = GlobalState()
gstate.dish_id = None
def conn_error(msg):
def conn_error(msg, *args):
# Connection errors that happen in an interval loop are not critical
# failures, but are interesting enough to print in non-verbose mode.
if loop_time > 0:
print(msg)
print(msg % args)
else:
logging.error(msg)
logging.error(msg, *args)
def loop_body():
if gstate.dish_id is None:
@ -139,13 +139,13 @@ def main():
if verbose:
print("Using dish ID: " + gstate.dish_id)
except starlink_grpc.GrpcError as e:
conn_error("Failure getting dish ID: " + str(e))
conn_error("Failure getting dish ID: %s", str(e))
return 1
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
conn_error("Failure getting ping stats: " + str(e))
conn_error("Failure getting ping stats: %s", str(e))
return 1
topic_prefix = "starlink/dish_ping_stats/" + gstate.dish_id + "/"
@ -163,7 +163,7 @@ def main():
if verbose:
print("Successfully published to MQTT broker")
except Exception as e:
conn_error("Failed publishing to MQTT broker: " + str(e))
conn_error("Failed publishing to MQTT broker: %s", str(e))
return 1
return 0

View file

@ -98,7 +98,7 @@ def main():
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
logging.error("Failure getting ping stats: " + str(e))
logging.error("Failure getting ping stats: %s", str(e))
return 1
if verbose:

View file

@ -159,13 +159,13 @@ def main():
tags = ["id"]
retention_policy = rp
def conn_error(msg):
def conn_error(msg, *args):
# Connection errors that happen in an interval loop are not critical
# failures, but are interesting enough to print in non-verbose mode.
if loop_time > 0:
print(msg)
print(msg % args)
else:
logging.error(msg)
logging.error(msg, *args)
def flush_pending(client):
try:
@ -174,7 +174,7 @@ def main():
print("Data points written: " + str(gstate.pending))
gstate.pending = 0
except Exception as e:
conn_error("Failed writing to InfluxDB database: " + str(e))
conn_error("Failed writing to InfluxDB database: %s", str(e))
return 1
return 0

View file

@ -107,13 +107,13 @@ def main():
gstate = GlobalState()
gstate.dish_id = None
def conn_error(msg):
def conn_error(msg, *args):
# Connection errors that happen in an interval loop are not critical
# failures, but are interesting enough to print in non-verbose mode.
if loop_time > 0:
print(msg)
print(msg % args)
else:
logging.error(msg)
logging.error(msg, *args)
def loop_body():
try:
@ -166,7 +166,7 @@ def main():
if verbose:
print("Successfully published to MQTT broker")
except Exception as e:
conn_error("Failed publishing to MQTT broker: " + str(e))
conn_error("Failed publishing to MQTT broker: %s", str(e))
return 1
return 0

View file

@ -89,7 +89,7 @@ try:
g_stats, pd_stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-",
samples, verbose)
except starlink_json.JsonError as e:
logging.error("Failure getting ping stats: " + str(e))
logging.error("Failure getting ping stats: %s", str(e))
sys.exit(1)
if verbose:

View file

@ -108,6 +108,7 @@ def get_status():
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
return response.dish_get_status
def get_id():
"""Return the ID from the dish status information.
@ -124,6 +125,7 @@ def get_id():
except grpc.RpcError as e:
raise GrpcError(e)
def history_ping_field_names():
"""Return the field names of the packet loss stats.
@ -133,7 +135,7 @@ def history_ping_field_names():
stat names.
"""
return [
"samples"
"samples",
], [
"total_ping_drop",
"count_full_ping_drop",
@ -142,14 +144,15 @@ def history_ping_field_names():
"count_full_obstructed_ping_drop",
"count_unscheduled",
"total_unscheduled_ping_drop",
"count_full_unscheduled_ping_drop"
"count_full_unscheduled_ping_drop",
], [
"init_run_fragment",
"final_run_fragment",
"run_seconds",
"run_minutes"
"run_minutes",
]
def get_history():
"""Fetch history data and return it in grpc structure format.
@ -161,6 +164,7 @@ def get_history():
response = stub.Handle(spacex.api.device.device_pb2.Request(get_history={}))
return response.dish_get_history
def history_ping_stats(parse_samples, verbose=False):
"""Fetch, parse, and compute the packet loss stats.
@ -239,7 +243,7 @@ def history_ping_stats(parse_samples, verbose=False):
if run_length <= 60:
second_runs[run_length - 1] += run_length
else:
minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length
minute_runs[min((run_length-1) // 60 - 1, 59)] += run_length
run_length = 0
elif init_run_length is None:
init_run_length = 0
@ -267,7 +271,7 @@ def history_ping_stats(parse_samples, verbose=False):
run_length = 0
return {
"samples": parse_samples
"samples": parse_samples,
}, {
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
@ -276,10 +280,10 @@ def history_ping_stats(parse_samples, verbose=False):
"count_full_obstructed_ping_drop": count_full_obstruct,
"count_unscheduled": count_unsched,
"total_unscheduled_ping_drop": total_unsched_drop,
"count_full_unscheduled_ping_drop": count_full_unsched
"count_full_unscheduled_ping_drop": count_full_unsched,
}, {
"init_run_fragment": init_run_length,
"final_run_fragment": run_length,
"run_seconds": second_runs,
"run_minutes": minute_runs
"run_minutes": minute_runs,
}

View file

@ -28,7 +28,7 @@ def history_ping_field_names():
stat names.
"""
return [
"samples"
"samples",
], [
"total_ping_drop",
"count_full_ping_drop",
@ -37,14 +37,15 @@ def history_ping_field_names():
"count_full_obstructed_ping_drop",
"count_unscheduled",
"total_unscheduled_ping_drop",
"count_full_unscheduled_ping_drop"
"count_full_unscheduled_ping_drop",
], [
"init_run_fragment",
"final_run_fragment",
"run_seconds",
"run_minutes"
"run_minutes",
]
def get_history(filename):
"""Read JSON data and return the raw history in dict format.
@ -63,6 +64,7 @@ def get_history(filename):
json_data = json.load(json_file)
return json_data["dishGetHistory"]
def history_ping_stats(filename, parse_samples, verbose=False):
"""Fetch, parse, and compute the packet loss stats.
@ -144,7 +146,7 @@ def history_ping_stats(filename, parse_samples, verbose=False):
if run_length <= 60:
second_runs[run_length - 1] += run_length
else:
minute_runs[min((run_length - 1)//60 - 1, 59)] += run_length
minute_runs[min((run_length-1) // 60 - 1, 59)] += run_length
run_length = 0
elif init_run_length is None:
init_run_length = 0
@ -172,7 +174,7 @@ def history_ping_stats(filename, parse_samples, verbose=False):
run_length = 0
return {
"samples": parse_samples
"samples": parse_samples,
}, {
"total_ping_drop": tot,
"count_full_ping_drop": count_full_drop,
@ -181,10 +183,10 @@ def history_ping_stats(filename, parse_samples, verbose=False):
"count_full_obstructed_ping_drop": count_full_obstruct,
"count_unscheduled": count_unsched,
"total_unscheduled_ping_drop": total_unsched_drop,
"count_full_unscheduled_ping_drop": count_full_unsched
"count_full_unscheduled_ping_drop": count_full_unsched,
}, {
"init_run_fragment": init_run_length,
"final_run_fragment": run_length,
"run_seconds": second_runs,
"run_minutes": minute_runs
"run_minutes": minute_runs,
}