Merge pull request #5 from sparky8512/main

Bring Current
This commit is contained in:
Leigh Phillips 2021-01-12 21:30:05 -08:00 committed by GitHub
commit e999fbf637
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 142 additions and 75 deletions

View file

@ -14,6 +14,7 @@ import datetime
import os import os
import sys import sys
import getopt import getopt
import logging
import warnings import warnings
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
@ -128,19 +129,20 @@ if print_usage or arg_error:
print(" -U <name>: Set username for authentication") print(" -U <name>: Set username for authentication")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
dish_id = starlink_grpc.get_id() logging.basicConfig(format="%(levelname)s: %(message)s")
if dish_id is None: try:
if verbose: dish_id = starlink_grpc.get_id()
print("Unable to connect to Starlink user terminal") except starlink_grpc.GrpcError as e:
logging.error("Failure getting dish ID: " + str(e))
sys.exit(1) sys.exit(1)
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose) g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
if g_stats is None: logging.error("Failure getting ping stats: " + str(e))
# verbose output already happened, so just bail.
sys.exit(1) sys.exit(1)
all_stats = g_stats.copy() all_stats = g_stats.copy()
@ -169,7 +171,7 @@ try:
influx_client.write_points(points, retention_policy=rp) influx_client.write_points(points, retention_policy=rp)
rc = 0 rc = 0
except Exception as e: except Exception as e:
print("Failed writing to InfluxDB database: " + str(e)) logging.error("Failed writing to InfluxDB database: " + str(e))
rc = 1 rc = 1
finally: finally:
influx_client.close() influx_client.close()

View file

@ -12,6 +12,7 @@
import sys import sys
import getopt import getopt
import logging
try: try:
import ssl import ssl
@ -97,17 +98,18 @@ if print_usage or arg_error:
print(" -U: Set username for authentication") print(" -U: Set username for authentication")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
dish_id = starlink_grpc.get_id() logging.basicConfig(format="%(levelname)s: %(message)s")
if dish_id is None: try:
if verbose: dish_id = starlink_grpc.get_id()
print("Unable to connect to Starlink user terminal") except starlink_grpc.GrpcError as e:
logging.error("Failure getting dish ID: " + str(e))
sys.exit(1) sys.exit(1)
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose) g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
if g_stats is None: logging.error("Failure getting ping stats: " + str(e))
# verbose output already happened, so just bail.
sys.exit(1) sys.exit(1)
topic_prefix = "starlink/dish_ping_stats/" + dish_id + "/" topic_prefix = "starlink/dish_ping_stats/" + dish_id + "/"
@ -128,5 +130,5 @@ if username is not None:
try: try:
paho.mqtt.publish.multiple(msgs, client_id=dish_id, **mqargs) paho.mqtt.publish.multiple(msgs, client_id=dish_id, **mqargs)
except Exception as e: except Exception as e:
print("Failed publishing to MQTT broker: " + str(e)) logging.error("Failed publishing to MQTT broker: " + str(e))
sys.exit(1) sys.exit(1)

View file

@ -13,6 +13,7 @@
import datetime import datetime
import sys import sys
import getopt import getopt
import logging
import starlink_grpc import starlink_grpc
@ -61,6 +62,8 @@ if print_usage or arg_error:
print(" -H: print CSV header instead of parsing file") print(" -H: print CSV header instead of parsing file")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
logging.basicConfig(format="%(levelname)s: %(message)s")
g_fields, pd_fields, rl_fields = starlink_grpc.history_ping_field_names() g_fields, pd_fields, rl_fields = starlink_grpc.history_ping_field_names()
if print_header: if print_header:
@ -78,10 +81,10 @@ if print_header:
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
try:
g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose) g_stats, pd_stats, rl_stats = starlink_grpc.history_ping_stats(samples, verbose)
except starlink_grpc.GrpcError as e:
if g_stats is None: logging.error("Failure getting ping stats: " + str(e))
# verbose output already happened, so just bail.
sys.exit(1) sys.exit(1)
if verbose: if verbose:

View file

@ -10,6 +10,7 @@
import datetime import datetime
import sys import sys
import getopt import getopt
import logging
import grpc import grpc
@ -44,6 +45,8 @@ if print_usage or arg_error:
print(" -H: print CSV header instead of parsing file") print(" -H: print CSV header instead of parsing file")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
logging.basicConfig(format="%(levelname)s: %(message)s")
if print_header: if print_header:
header = [ header = [
"datetimestamp_utc", "datetimestamp_utc",
@ -71,7 +74,7 @@ try:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
except grpc.RpcError: except grpc.RpcError:
print("Failed getting status info") logging.error("Failed getting status info")
sys.exit(1) sys.exit(1)
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()

View file

@ -12,8 +12,9 @@ import time
import os import os
import sys import sys
import getopt import getopt
import logging
import warnings import warnings
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
from influxdb import SeriesHelper from influxdb import SeriesHelper
@ -123,6 +124,18 @@ if print_usage or arg_error:
print(" -U <name>: Set username for authentication") print(" -U <name>: Set username for authentication")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
logging.basicConfig(format="%(levelname)s: %(message)s")
def conn_error(msg):
# Connection errors that happen while running in an interval loop are
# not critical failures, because they can (usually) be retried, or
# because they will be recorded as dish state unavailable. They're still
# interesting, though, so print them even in non-verbose mode.
if sleep_time > 0:
print(msg)
else:
logging.error(msg)
class DeviceStatusSeries(SeriesHelper): class DeviceStatusSeries(SeriesHelper):
class Meta: class Meta:
series_name = "spacex.starlink.user_terminal.status" series_name = "spacex.starlink.user_terminal.status"
@ -151,6 +164,7 @@ if "verify_ssl" in icargs and not icargs["verify_ssl"]:
influx_client = InfluxDBClient(**icargs) influx_client = InfluxDBClient(**icargs)
rc = 0
try: try:
dish_channel = None dish_channel = None
last_id = None last_id = None
@ -182,6 +196,7 @@ try:
pop_ping_latency_ms=status.pop_ping_latency_ms, pop_ping_latency_ms=status.pop_ping_latency_ms,
currently_obstructed=status.obstruction_stats.currently_obstructed, currently_obstructed=status.obstruction_stats.currently_obstructed,
fraction_obstructed=status.obstruction_stats.fraction_obstructed) fraction_obstructed=status.obstruction_stats.fraction_obstructed)
pending += 1
last_id = status.device_info.id last_id = status.device_info.id
last_failed = False last_failed = False
except grpc.RpcError: except grpc.RpcError:
@ -189,25 +204,36 @@ try:
dish_channel.close() dish_channel.close()
dish_channel = None dish_channel = None
if last_failed: if last_failed:
if last_id is not None: if last_id is None:
DeviceStatusSeries(id=last_id, state="DISH_UNREACHABLE") conn_error("Dish unreachable and ID unknown, so not recording state")
# When not looping, report this as failure exit status
rc = 1
else: else:
if verbose:
print("Dish unreachable")
DeviceStatusSeries(id=last_id, state="DISH_UNREACHABLE")
pending += 1
else:
if verbose:
print("Dish RPC channel error")
# Retry once, because the connection may have been lost while # Retry once, because the connection may have been lost while
# we were sleeping # we were sleeping
last_failed = True last_failed = True
continue continue
pending = pending + 1
if verbose: if verbose:
print("Samples: " + str(pending)) print("Samples queued: " + str(pending))
count = count + 1 count += 1
if count > 5: if count > 5:
try: try:
if pending:
DeviceStatusSeries.commit(influx_client) DeviceStatusSeries.commit(influx_client)
rc = 0
if verbose: if verbose:
print("Wrote " + str(pending)) print("Samples written: " + str(pending))
pending = 0 pending = 0
except Exception as e: except Exception as e:
print("Failed to write: " + str(e)) conn_error("Failed to write: " + str(e))
rc = 1
count = 0 count = 0
if sleep_time > 0: if sleep_time > 0:
time.sleep(sleep_time) time.sleep(sleep_time)
@ -216,12 +242,13 @@ try:
finally: finally:
# Flush on error/exit # Flush on error/exit
try: try:
if pending:
DeviceStatusSeries.commit(influx_client) DeviceStatusSeries.commit(influx_client)
if verbose:
print("Wrote " + str(pending))
rc = 0 rc = 0
if verbose:
print("Samples written: " + str(pending))
except Exception as e: except Exception as e:
print("Failed to write: " + str(e)) conn_error("Failed to write: " + str(e))
rc = 1 rc = 1
influx_client.close() influx_client.close()
if dish_channel is not None: if dish_channel is not None:

View file

@ -10,6 +10,7 @@
import sys import sys
import getopt import getopt
import logging
try: try:
import ssl import ssl
@ -81,12 +82,14 @@ if print_usage or arg_error:
print(" -U: Set username for authentication") print(" -U: Set username for authentication")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
logging.basicConfig(format="%(levelname)s: %(message)s")
try: try:
with grpc.insecure_channel("192.168.100.1:9200") as channel: with grpc.insecure_channel("192.168.100.1:9200") as channel:
stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel) stub = spacex.api.device.device_pb2_grpc.DeviceStub(channel)
response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={})) response = stub.Handle(spacex.api.device.device_pb2.Request(get_status={}))
except grpc.RpcError: except grpc.RpcError:
print("Failed getting status info") logging.error("Failed getting status info")
sys.exit(1) sys.exit(1)
status = response.dish_get_status status = response.dish_get_status
@ -126,5 +129,5 @@ if username is not None:
try: try:
paho.mqtt.publish.multiple(msgs, client_id=status.device_info.id, **mqargs) paho.mqtt.publish.multiple(msgs, client_id=status.device_info.id, **mqargs)
except Exception as e: except Exception as e:
print("Failed publishing to MQTT broker: " + str(e)) logging.error("Failed publishing to MQTT broker: " + str(e))
sys.exit(1) sys.exit(1)

View file

@ -16,6 +16,7 @@
import datetime import datetime
import sys import sys
import getopt import getopt
import logging
import starlink_json import starlink_json
@ -65,6 +66,8 @@ if print_usage or arg_error:
print(" -H: print CSV header instead of parsing file") print(" -H: print CSV header instead of parsing file")
sys.exit(1 if arg_error else 0) sys.exit(1 if arg_error else 0)
logging.basicConfig(format="%(levelname)s: %(message)s")
g_fields, pd_fields, rl_fields = starlink_json.history_ping_field_names() g_fields, pd_fields, rl_fields = starlink_json.history_ping_field_names()
if print_header: if print_header:
@ -82,11 +85,11 @@ if print_header:
timestamp = datetime.datetime.utcnow() timestamp = datetime.datetime.utcnow()
try:
g_stats, pd_stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-", g_stats, pd_stats, rl_stats = starlink_json.history_ping_stats(args[0] if args else "-",
samples, verbose) samples, verbose)
except starlink_json.JsonError as e:
if g_stats is None: logging.error("Failure getting ping stats: " + str(e))
# verbose output already happened, so just bail.
sys.exit(1) sys.exit(1)
if verbose: if verbose:

View file

@ -82,6 +82,21 @@ import grpc
import spacex.api.device.device_pb2 import spacex.api.device.device_pb2
import spacex.api.device.device_pb2_grpc import spacex.api.device.device_pb2_grpc
class GrpcError(Exception):
"""Provides error info when something went wrong with a gRPC call."""
def __init__(self, e, *args, **kwargs):
# grpc.RpcError is too verbose to print in whole, but it may also be
# a Call object, and that class has some minimally useful info.
if isinstance(e, grpc.Call):
msg = e.details()
elif isinstance(e, grpc.RpcError):
msg = "Unknown communication or service error"
else:
msg = str(e)
super().__init__(msg, *args, **kwargs)
def get_status(): def get_status():
"""Fetch status data and return it in grpc structure format. """Fetch status data and return it in grpc structure format.
@ -98,13 +113,16 @@ def get_id():
Returns: Returns:
A string identifying the Starlink user terminal reachable from the A string identifying the Starlink user terminal reachable from the
local network, or None if no user terminal is currently reachable. local network.
Raises:
GrpcError: No user terminal is currently reachable.
""" """
try: try:
status = get_status() status = get_status()
return status.device_info.id return status.device_info.id
except grpc.RpcError: except grpc.RpcError as e:
return None raise GrpcError(e)
def history_ping_field_names(): def history_ping_field_names():
"""Return the field names of the packet loss stats. """Return the field names of the packet loss stats.
@ -152,20 +170,18 @@ def history_ping_stats(parse_samples, verbose=False):
verbose (bool): Optionally produce verbose output. verbose (bool): Optionally produce verbose output.
Returns: Returns:
On success, a tuple with 3 dicts, the first mapping general stat names A tuple with 3 dicts, the first mapping general stat names to their
to their values, the second mapping ping drop stat names to their values, the second mapping ping drop stat names to their values and
values and the third mapping ping drop run length stat names to their the third mapping ping drop run length stat names to their values.
values.
On failure, the tuple (None, None, None). Raises:
GrpcError: Failed getting history info from the Starlink user
terminal.
""" """
try: try:
history = get_history() history = get_history()
except grpc.RpcError: except grpc.RpcError as e:
if verbose: raise GrpcError(e)
# RpcError is too verbose to print the details.
print("Failed getting history")
return None, None, None
# 'current' is the count of data samples written to the ring buffer, # 'current' is the count of data samples written to the ring buffer,
# irrespective of buffer wrap. # irrespective of buffer wrap.
@ -231,7 +247,7 @@ def history_ping_stats(parse_samples, verbose=False):
count_unsched += 1 count_unsched += 1
total_unsched_drop += d total_unsched_drop += d
if d >= 1: if d >= 1:
count_full_unsched += d count_full_unsched += 1
# scheduled=false and obstructed=true do not ever appear to overlap, # scheduled=false and obstructed=true do not ever appear to overlap,
# but in case they do in the future, treat that as just unscheduled # but in case they do in the future, treat that as just unscheduled
# in order to avoid double-counting it. # in order to avoid double-counting it.

View file

@ -14,6 +14,11 @@ import sys
from itertools import chain from itertools import chain
class JsonError(Exception):
"""Provides error info when something went wrong with JSON parsing."""
def history_ping_field_names(): def history_ping_field_names():
"""Return the field names of the packet loss stats. """Return the field names of the packet loss stats.
@ -46,15 +51,16 @@ def get_history(filename):
Args: Args:
filename (str): Filename from which to read JSON data, or "-" to read filename (str): Filename from which to read JSON data, or "-" to read
from standard input. from standard input.
Raises:
Various exceptions depending on Python version: Failure to open or
read input or invalid JSON read on input.
""" """
if filename == "-": if filename == "-":
json_data = json.load(sys.stdin) json_data = json.load(sys.stdin)
else: else:
json_file = open(filename) with open(filename) as json_file:
try:
json_data = json.load(json_file) json_data = json.load(json_file)
finally:
json_file.close()
return json_data["dishGetHistory"] return json_data["dishGetHistory"]
def history_ping_stats(filename, parse_samples, verbose=False): def history_ping_stats(filename, parse_samples, verbose=False):
@ -68,19 +74,19 @@ def history_ping_stats(filename, parse_samples, verbose=False):
verbose (bool): Optionally produce verbose output. verbose (bool): Optionally produce verbose output.
Returns: Returns:
On success, a tuple with 3 dicts, the first mapping general stat names A tuple with 3 dicts, the first mapping general stat names to their
to their values, the second mapping ping drop stat names to their values, the second mapping ping drop stat names to their values and
values and the third mapping ping drop run length stat names to their the third mapping ping drop run length stat names to their values.
values.
On failure, the tuple (None, None, None). Raises:
JsonError: Failure to open, read, or parse JSON on input.
""" """
try: try:
history = get_history(filename) history = get_history(filename)
except ValueError as e:
raise JsonError("Failed to parse JSON: " + str(e))
except Exception as e: except Exception as e:
if verbose: raise JsonError(e)
print("Failed getting history: " + str(e))
return None, None, None
# "current" is the count of data samples written to the ring buffer, # "current" is the count of data samples written to the ring buffer,
# irrespective of buffer wrap. # irrespective of buffer wrap.
@ -100,13 +106,13 @@ def history_ping_stats(filename, parse_samples, verbose=False):
# index to next data sample after the newest one. # index to next data sample after the newest one.
offset = current % samples offset = current % samples
tot = 0 tot = 0.0
count_full_drop = 0 count_full_drop = 0
count_unsched = 0 count_unsched = 0
total_unsched_drop = 0 total_unsched_drop = 0.0
count_full_unsched = 0 count_full_unsched = 0
count_obstruct = 0 count_obstruct = 0
total_obstruct_drop = 0 total_obstruct_drop = 0.0
count_full_obstruct = 0 count_full_obstruct = 0
second_runs = [0] * 60 second_runs = [0] * 60
@ -126,9 +132,10 @@ def history_ping_stats(filename, parse_samples, verbose=False):
for i in sample_range: for i in sample_range:
d = history["popPingDropRate"][i] d = history["popPingDropRate"][i]
tot += d
if d >= 1: if d >= 1:
count_full_drop += d # just in case...
d = 1
count_full_drop += 1
run_length += 1 run_length += 1
elif run_length > 0: elif run_length > 0:
if init_run_length is None: if init_run_length is None:
@ -145,7 +152,7 @@ def history_ping_stats(filename, parse_samples, verbose=False):
count_unsched += 1 count_unsched += 1
total_unsched_drop += d total_unsched_drop += d
if d >= 1: if d >= 1:
count_full_unsched += d count_full_unsched += 1
# scheduled=false and obstructed=true do not ever appear to overlap, # scheduled=false and obstructed=true do not ever appear to overlap,
# but in case they do in the future, treat that as just unscheduled # but in case they do in the future, treat that as just unscheduled
# in order to avoid double-counting it. # in order to avoid double-counting it.
@ -153,7 +160,8 @@ def history_ping_stats(filename, parse_samples, verbose=False):
count_obstruct += 1 count_obstruct += 1
total_obstruct_drop += d total_obstruct_drop += d
if d >= 1: if d >= 1:
count_full_obstruct += d count_full_obstruct += 1
tot += d
# If the entire sample set is one big drop run, it will be both initial # If the entire sample set is one big drop run, it will be both initial
# fragment (continued from prior sample range) and final one (continued # fragment (continued from prior sample range) and final one (continued