Import of the watch repository from Pebble

This commit is contained in:
Matthieu Jeanson 2024-12-12 16:43:03 -08:00 committed by Katharine Berry
commit 3b92768480
10334 changed files with 2564465 additions and 0 deletions

438
tools/activity/fft.py Normal file
View file

@ -0,0 +1,438 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################################
# Test FFT algorithm
#
# This is used to experiment with various step-tracking algorithms. It is a python implementation
# of an FFT as described in:
# "Real-valued Fast Fourier Transform Algorithm", from IEEE Transactions on Acoustics,
# Speech, and Signal Processing, Vol. ASSP-35, No. 6, June 1987
#
# The firmware uses this same algorithm, but implemented in C in the prv_fft_2radix_real()
# method of kraepelin_algorithm.c
##################################################################################################
import argparse
import os
import sys
import logging
import math
###########################################################################################
g_walk_10_steps = [
[-362, -861, 69],
[-309, -899, 45],
[-266, -904, 21],
[-242, -848, -134],
[-272, -839, 34],
[-207, -919, 14],
[-244, -879, 93],
[-238, -856, 91],
[-185, -883, 37],
[-217, -855, -156],
[-200, -883, 25],
[-154, -927, 42],
[-179, -935, 71],
[-184, -956, 32],
[-129, -999, 99],
[-195, -950, -112],
[-222, -969, -164],
[-351, -996, -190],
[-277, -1218, -259],
[-212, -1018, -250],
[-209, -812, -142],
[-182, -680, -200],
[-257, -642, -169],
[-269, -797, -289],
[-142, -1107, -330],
[-185, -909, -300],
[-229, -706, -155],
[-171, -750, -161],
[-181, -811, -218],
[-173, -845, -149],
[-118, -887, -126],
[-150, -871, -100],
[-164, -908, -146],
[-175, -958, -161],
[-231, -952, -113],
[-273, -1006, -205],
[-321, -1047, -351],
[-321, -1064, -300],
[-262, -945, -210],
[-298, -770, -124],
[-338, -772, 95],
[-325, -818, -179],
[-329, -780, -153],
[-280, -796, -151],
[-230, -755, -100],
[-234, -759, 44],
[-248, -807, 90],
[-217, -872, 79],
[-204, -887, 74],
[-189, -939, 78],
[-220, -1014, -129],
[-147, -1107, -129],
[-274, -1013, -158],
[-301, -1007, -258],
[-351, -1131, -346],
[-118, -1086, -355],
[-290, -716, -213],
[-288, -720, -290],
[-235, -825, -344],
[-179, -819, -243],
[-228, -670, -185],
[-125, -790, -145],
[-145, -795, -207],
[-152, -809, 76],
[-98, -871, -115],
[-89, -855, -111],
[-116, -879, 84],
[-161, -945, -172],
[-147, -1017, -173],
[-278, -1012, -146],
[-268, -1049, -247],
[-279, -1026, -260],
[-286, -958, -187],
[-288, -890, -167],
[-359, -873, -168],
[-324, -904, -147],
[-263, -804, -134],
[-214, -712, 37],
[-189, -698, 29],
[-183, -755, 74],
[-182, -841, 98],
[-115, -894, 73],
[-149, -857, 57],
[-93, -927, -68],
[-145, -988, -120],
[-112, -1095, -112],
[-201, -1059, -146],
[-278, -1104, -206],
[-284, -1204, -213],
[-214, -966, -254],
[-272, -730, -140],
[-233, -785, -252],
[-259, -813, -272],
[-156, -840, -205],
[-163, -765, -110],
[-165, -741, 97],
[-164, -791, 86],
[-99, -849, -69],
[-99, -820, -81],
[-94, -842, -37],
[-142, -881, -109],
[-153, -978, -155],
[-212, -934, 71],
[-341, -947, 99],
[-406, -1039, -283],
[-265, -1146, -206],
[-296, -979, -163],
[-345, -864, 98],
[-216, -907, 38],
[-242, -809, 47],
[-154, -736, 52],
[-137, -700, -101],
[-184, -743, -136],
[-191, -850, 86],
[-206, -883, 85],
[-194, -875, 48],
[-148, -937, 46],
[-193, -983, 31],
[-176, -1062, 43],
[-251, -1006, -114],
[-284, -1036, -192],
[-374, -1181, -248],
[-167, -1177, -271],
[-253, -794, -128],
[-285, -651, -129],
[-228, -757, -227],
[-260, -843, -201],
[-189, -899, -253],
[-212, -800, -136],
[-218, -728, -136],
[-177, -761, -129],
[-165, -806, -137],
[-157, -839, -122],
[-116, -899, -104],
[-191, -874, 77],
[-174, -911, 95],
[-193, -971, -147],
[-255, -961, -127],
[-222, -1052, -124],
[-333, -1021, -223],
[-245, -1018, -215],
[-269, -850, 91],
[-318, -754, -120],
[-335, -878, -199],
[-322, -986, -224],
[-192, -902, -179],
[-177, -712, 86],
[-196, -673, 88],
[-178, -751, -101],
[-182, -847, 70],
[-147, -909, -131],
[-170, -939, 43],
[-224, -994, 60],
[-189, -1051, 42],
[-242, -968, -183],
[-312, -978, -213],
[-317, -1298, -334],
[-184, -1131, -330],
[-287, -754, -141],
[-249, -773, -287],
[-166, -842, -297],
[-196, -742, -214],
[-163, -729, -198],
[-177, -757, -197],
[-174, -830, -155],
[-159, -860, -149],
[-145, -856, 72],
[-132, -849, 47],
[-145, -839, 62],
[-179, -843, 76],
[-163, -941, -114],
[-230, -963, -110],
]
##################################################################################################
def real_value_fft(x):
""" Real value FFT as described in Appendix of:
"Real-valued Fast Fourier Transform Algorithm", from IEEE Transactions on Acoustics, Speech,
and Signal Processing, Vol. ASSP-35, No. 6, June 1987
"""
# Make sure we have a power of 2 length input
n = len(x)
m = int(math.log(n, 2))
if (math.pow(2, m) != n):
raise RuntimeError("Length must be a power of 2")
# The rest of the code assumes 1-based indexing (it comes from fortran)
x = [0] + x
# ---------------------------------------------------------------------------------
# Digit reverse counter
j = 1
n1 = n - 1
for i in range(1, n1 + 1):
if (i < j):
xt = x[j]
x[j] = x[i]
x[i] = xt
k = n/2
while (k < j):
j = j - k
k = k / 2
j = j + k
# ---------------------------------------------------------------------------------
# Length 2 butterflies
for i in range(1, n + 1, 2):
xt = x[i]
x[i] = xt + x[i+1]
x[i+1] = xt - x[i+1]
# ---------------------------------------------------------------------------------
# Other butterflies
n2 = 1
for k in range(2, m + 1):
n4 = n2
n2 = 2 * n4
n1 = 2 * n2
e = 2 * math.pi / n1
for i in range(1, n+1, n1):
xt = x[i]
x[i] = xt + x[i + n2]
x[i + n2] = xt - x[i + n2]
x[i + n4 + n2] = -x[i + n4 + n2]
a = e
for j in range(1, n4 - 1):
i1 = i + j
i2 = i - j + n2
i3 = i + j + n2
i4 = i - j + n1
cc = math.cos(a)
ss = math.sin(a)
a = a + e
t1 = x[i3] * cc + x[i4] * ss
t2 = x[i3] * ss - x[i4] * cc
x[i4] = x[i2] - t2
x[i3] = -x[i2] - t2
x[i2] = x[i1] - t1
x[i1] = x[i1] + t1
return x[1:]
###################################################################################################
def compute_magnitude(x):
""" The real_value_fft() produces an array containing outputs in this order:
[Re(0), Re(1), ..., Re(N/2), Im(N/2-1), ..., Im(1)]
This method returns the magnitudes. The magnitude of term i is sqrt(Re(i)**2 + Im(i)**2)
"""
result = []
n = len(x)
real_idx = 0
im_idx = n - 1
result.append(x[real_idx])
real_idx += 1
while real_idx <= n/2 - 1:
mag = (x[real_idx]**2 + x[im_idx]**2) ** 0.5
result.append(mag)
real_idx += 1
im_idx -= 1
result.append(x[real_idx])
return result
###################################################################################################
def apply_gausian(x, width=0.1):
""" Multiply x by the gaussian function. Width is a fraction, like 0.1
"""
result = []
n = len(x)
mid = float(n/2)
denominator = n**2 * width
for i in range(len(x)):
print i-mid, (i-mid)**2, -1 * (i - mid)**2/denominator, \
math.exp(-1 * (i - mid)**2/denominator)
g = math.exp(-1 * (i - mid)**2/denominator)
result.append(g * x[i])
return result
###################################################################################################
def print_graph(x):
min_value = min(x)
max_value = max(x)
extent = max(abs(min_value), abs(max_value))
scale = 2 * extent
min_value = -extent
for i in range(len(x)):
print "%4d: %10.3f: " % (i, x[i]),
position = int((x[i] - min_value) * 80 / scale)
if position < 40:
print ' ' * position,
print '*' * (40 - position)
else:
print ' ' * 40,
print '*' * (position - 40)
###################################################################################################
if __name__ == '__main__':
# Collect our command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', help="Turn on debug logging")
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
# -------------------------------------------------------------------------------------
# Constant signal
if 0:
input_len = 128
input = [1 for x in range(input_len)]
print "\n############ INPUT ######################"
print_graph(input)
result = real_value_fft(input)
print "\n############ RESULT ######################"
print_graph(result)
# -------------------------------------------------------------------------------------
# N sine waves
if 0:
input_len = 128
freq = 7
input = [math.cos(float(x)/input_len * freq * 2 * math.pi) for x in range(input_len)]
print "\n############ INPUT ######################"
print_graph(input)
print "\n############ GAUSIAN OF INPUT ############"
# input = apply_gausian(input, 0.1)
print_graph(input)
result = real_value_fft(input)
print "\n############ REAL, IMAG ######################"
print_graph(result)
print "\n############ MAGNITUDE ######################"
mag = compute_magnitude(result)
print_graph(mag)
# -------------------------------------------------------------------------------------
# Step data
if 1:
input_len = 128
raw_input = g_walk_10_steps[0:input_len]
x_data = [x for x, y, z in raw_input]
x_mean = sum(x_data) / len(x_data)
x_data = [x - x_mean for x in x_data]
y_data = [y for x, y, z in raw_input]
y_mean = sum(y_data) / len(y_data)
y_data = [y - y_mean for y in y_data]
z_data = [z for x, y, z in raw_input]
z_mean = sum(z_data) / len(z_data)
z_data = [z - z_mean for z in z_data]
print "\n############ X ######################"
print_graph(x_data)
print "\n############ Y ######################"
print_graph(y_data)
print "\n############ Z ######################"
print_graph(z_data)
input = []
for (x, y, z) in raw_input:
mag = x**2 + y**2 + z**2
mag = mag ** 0.5
input.append(mag)
mean_mag = sum(input) / len(input)
input = [x - mean_mag for x in input]
print "\n############ INPUT ######################"
# input = apply_gausian(input)
print_graph(input)
result = real_value_fft(input)
print "\n############ REAL, IMAG ######################"
print_graph(result)
print "\n############ MAGNITUDE ######################"
mag = compute_magnitude(result)
print_graph(mag)

View file

@ -0,0 +1,78 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################################
# Generate sample health activity blob
##################################################################################################
import argparse
import os
import sys
import logging
import math
import time
import struct
"""
typedef struct {
uint8_t steps; // # of steps in this minute
uint8_t orientation; // average orientation of the watch
uint8_t vmc; // vector magnitude count
} MinuteData;
typedef struct {
uint16_t version; // version, initial version is 1
uint16_t len; // length in bytes of blob, including this entire header
uint32_t time_utc; // UTC time of pebble
uint32_t time_local; // local time of pebble
uint16_t num_samples; // number of samples that follow
MinuteData samples[];
} Header
"""
###################################################################################################
if __name__ == '__main__':
# Collect our command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', help="Turn on debug logging")
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
sample_format = '<BBB'
header_format = '<HHIIH'
num_samples = 10
blob = struct.pack(
header_format,
1,
struct.calcsize(header_format) + num_samples * struct.calcsize(sample_format),
int(time.time()),
int(time.time()),
num_samples)
for i in range(num_samples):
blob += struct.pack(sample_format,
30 + (i % 5),
4,
50 + (i % 4))
with open('health_blob.bin', "w") as out:
out.write(blob)

View file

@ -0,0 +1,780 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Parse the data logging records generated by the activity service
This tool accepts binary data files obtained from the data logging service, or log files
captured from a Pebble support request. If given a log file, we look for log lines that start with
"RAW:". These contain the same content as the data logging records but base64 encoded.
Example log file entries from a Pebble support request
2015-09-02 14:34:06:000GMT activity.c:706
RAW: AQABAG2m5lUDcBkZjP///4v///+M////i////43///+L////jf///47///+L////kf///5P///8=
2015-09-02 14:34:06:000GMT activity.c:710
RAW: iP///4n///+P////kv///5H///+O////iv///4z///+J////jf///4r///+F////gv///3////8=
To extract the binary data logging records from the Pebble, use the "pebble data-logging" utility:
pebble data-logging disable-sends # so that records don't get sent to phone
# ... Run the watch for a while to generate the data logging records
pebble data-logging list # List all data logging sessions
pebble data-logging download --session_id=XXX # where XXX is the session_id of the
# activity service (uuid == 0,
# tag = ALG_DLS_TAG)
This file will generate a .c file containing the sample data captured in the data logging records.
This .c file can in turn be incorporated into the algorithm unit tests.
"""
import argparse
import base64
import datetime
import gzip
import json
from jira.client import JIRA
import logging
import os
import re
import struct
import sys
# Create a ~/.triage JSON file and override/configure these keys:
SETTINGS = {
# JIRA server url / username / password
'server_url': 'https://pebbletechnology.atlassian.net',
'user': None,
'password': None,
# Local path to download issue attachments to
'download_path': '/tmp/',
}
SLEEP_DEFAULT_EXPECTED_TEXT = \
""" //> TEST_VERSION 3
//> TEST_TOTAL -1
//> TEST_TOTAL_MIN -1
//> TEST_TOTAL_MAX -1
//> TEST_DEEP -1
//> TEST_DEEP_MIN -1
//> TEST_DEEP_MAX -1
//> TEST_START_AT -1
//> TEST_START_AT_MIN -1
//> TEST_START_AT_MAX -1
//> TEST_END_AT -1
//> TEST_END_AT_MIN -1
//> TEST_END_AT_MAX -1
//> TEST_CUR_STATE_ELAPSED -1
//> TEST_CUR_STATE_ELAPSED_MIN -1
//> TEST_CUR_STATE_ELAPSED_MAX -1
//> TEST_IN_SLEEP 0
//> TEST_IN_SLEEP_MIN 0
//> TEST_IN_SLEEP_MAX 0
//> TEST_IN_DEEP_SLEEP 0
//> TEST_IN_DEEP_SLEEP_MIN 0
//> TEST_IN_DEEP_SLEEP_MAX 0
//> TEST_WEIGHT 1.0
"""
#############################################################################################
#############################################################################################
class JIRASupport(object):
DESYM_EXT = ".desym"
def __init__(self, issue_id):
self.issue_id = issue_id
def issue_url(self, issue_id):
return "%s/browse/%s" % (SETTINGS["server_url"], issue_id)
def issue_download_path(self, issue_id):
return os.path.join(SETTINGS["download_path"], issue_id)
def download_path(self):
return os.path.join(SETTINGS["download_path"])
def load_user_settings(self):
settings_path = '~/.triage'
try:
user_settings_file = open(os.path.expanduser(settings_path), 'rb')
user_settings = json.load(user_settings_file)
except IOError as e:
if e.errno == 2:
logging.error("""Please create %s with credentials: """
"""'{ "user": "$USER", "password": "$PASSWORD" }'""",
settings_path)
return
SETTINGS.update(user_settings)
def create_jira(self):
self.load_user_settings()
creds = (SETTINGS["user"], SETTINGS["password"])
return JIRA(options={'server': SETTINGS["server_url"]},
basic_auth=creds)
def download_attachments(self, issue_path, issue):
attachments = issue.fields.attachment
if not attachments:
raise Exception("No attachments found.")
local_paths = []
for attachment in attachments:
filename = attachment.raw['filename']
local_path = os.path.join(issue_path, filename)
local_paths.append(local_path)
if os.path.exists(local_path):
logging.debug("Skipping %s: file already exists.", filename)
continue
logging.info("Downloading %s..." % filename)
open(local_path, 'wb').write(attachment.get())
return set(local_paths)
def unzip_android_logs(self, paths):
ungz_paths = []
gz_paths = filter(lambda path: re.search("\.gz$", path), paths)
for gz_path in gz_paths:
with gzip.open(gz_path, 'rb') as f_in:
ungz_path = os.path.splitext(gz_path)[0]
with open(ungz_path, 'wb') as f_out:
try:
f_out.writelines(f_in)
except IOError:
logging.error("Error writing unzipped android log")
finally:
f_out.close()
ungz_paths.append(ungz_path)
return set(ungz_paths)
def get_watch_logs(self, local_attachment_paths):
""" Goes through the list of attachment paths returns a list containing only the
watch logs.
"""
watch_logs = []
got_device_logs = False
for path in sorted(local_attachment_paths):
# iOS uses "watch_logs...", Android <2.1 uses "pebble.log", and Androind >=2.1 (Holo)
# uses "device-logs.log"
if "watch_logs" not in path and "pebble.log" not in path and "device-logs" not in path:
# Not a watch_logs... file
continue
if "device-logs" in path:
got_device_logs = True
elif "pebble.log" in path and got_device_logs:
# Newer Android requests (Holo) have watch logs in "device-logs", not pebble.log
continue
if ".gz" in path:
# Don't try to process pebble.log.gz
continue
if JIRASupport.DESYM_EXT in path:
# This watch_logs file is a desymmed log file
continue
watch_logs.append(path)
return watch_logs
def all_logs(self):
""" Download all watch log files and return a list of their filenames """
jira = self.create_jira()
issue = jira.issue(self.issue_id)
# Create /tmp/ISSUE_ID to download attachments to:
issue_path = self.issue_download_path(self.issue_id)
if not os.path.exists(issue_path):
logging.info("Creating %s" % issue_path)
os.makedirs(issue_path)
else:
logging.info("Using %s" % issue_path)
# Download attachments:
try:
local_attachment_paths = self.download_attachments(issue_path, issue)
except:
logging.info("No attachments to process")
return []
# Android attaches a .gz with all the logs:
android_log_paths = self.unzip_android_logs(local_attachment_paths)
local_attachment_paths = local_attachment_paths | android_log_paths
return self.get_watch_logs(local_attachment_paths)
#############################################################################################
class ParseAccelSamplesFile(object):
""" Parse raw accel data and produce a text output file from it. The raw binary
format is documented as the ActivityRawSamplesRecord structure in activity.h
For 'c' format, we want this type of format
AccelRawData *activity_sample_42(int *len) {
static AccelRawData samples[] = {
{ -362, -861, 69},
{ -309, -899, 45},
...
{ -163, -941, -114},
{ -230, -963, -110},
};
*len = ARRAY_LENGTH(samples);
return samples;
}
"""
ACTIVITY_RAW_SAMPLE_FLAG_FIRST_RECORD = 0x01 # Set for first record of session
ACTIVITY_RAW_SAMPLE_FLAG_LAST_RECORD = 0x02 # set for last record of session
ACTIVITY_RAW_SAMPLES_VERSION_1 = 1
ACTIVITY_RAW_SAMPLES_VERSION_2 = 2
ACTIVITY_RAW_SAMPLES_MAX_ENTRIES = 25
ACTIVITY_RAW_SAMPLES_PER_SECOND = 25
#############################################################################################
def __init__(self, image, bin_file, sample_prefix, format, exp, exp_min, exp_max):
self.bin_file = bin_file
self.format = format
self.expected = (exp, exp_min, exp_max)
self.sample_prefix = sample_prefix
self.image = bytearray(image)
self.offset = 0
self.session_start_time_local = None
self.session_id = None
self.session_num_samples = 0
#############################################################################################
def _output_start_of_session(self):
if self.format == 'c':
# Starting a new sample
print("\n\n")
print("// ----------------------------------------------------------------")
print("// Sample captured: %s local" %
(datetime.datetime.utcfromtimestamp(self.session_start_time_local)))
print("AccelRawData *activity_sample_%s_%d(int *len) {" % (self.sample_prefix,
self.session_id))
print(" // The unit tests parse the //> TEST_.* lines below for test values")
print(" //> TEST_NAME %s_%d" % (self.sample_prefix, self.session_id))
print(" //> TEST_EXPECTED %d" % (self.expected[0]))
print(" //> TEST_EXPECTED_MIN %d" % (self.expected[1]))
print(" //> TEST_EXPECTED_MAX %d" % (self.expected[2]))
print(" //> TEST_WEIGHT 1.0")
print(" static AccelRawData samples[] = {")
else:
print("\n\n\n")
print("###################################################################")
print("##### Start of sample %d. Place this section in a new file ########" %
(self.session_id))
print("###################################################################")
#############################################################################################
def _output_sample(self, x, y, z):
if self.format == 'c':
print(" { %d, %d, %d}," % (x, y, z))
else:
print("%d, %d, %d" % (x, y, z))
#############################################################################################
def _output_end_of_session(self):
if self.session_id is not None:
if self.format == 'c':
print(" };")
print(" *len = ARRAY_LENGTH(samples);")
print(" return samples;")
print("}\n")
else:
print("\n")
#############################################################################################
def _parse_binary_item(self):
hdr_format = '<HHIBBBB'
pack_size = struct.calcsize(hdr_format)
version, session_id, session_start_time_local, flags, len, num_samples, num_entries = \
struct.unpack_from(hdr_format, self.image, self.offset)
self.offset += pack_size
logging.debug("Got timestamp: %s, session %d, flags: 0x%x, num_entries: %d, "
"num_samples: %d"
% (datetime.datetime.utcfromtimestamp(session_start_time_local),
session_id, flags, num_entries, num_samples))
if (version != self.ACTIVITY_RAW_SAMPLES_VERSION_1 and
version != self.ACTIVITY_RAW_SAMPLES_VERSION_2):
raise RuntimeError("Invalid record version: %d" % (version))
if session_id != self.session_id:
# New session about to start
# End the previous one
self._output_end_of_session()
self.session_start_time_local = session_start_time_local
self.session_id = session_id
self.session_num_samples = 0
if not (flags & self.ACTIVITY_RAW_SAMPLE_FLAG_FIRST_RECORD):
print("WARNING: Invalid record detected. Start of new session (%d) without"
" the first record flag" % (session_id))
self._output_start_of_session()
# Extract each sample
sample_format = '<I'
pack_size = struct.calcsize(sample_format)
num_samples_decoded = 0
offset = self.offset
samples_per_minute = 60 * self.ACTIVITY_RAW_SAMPLES_PER_SECOND
for i in range(num_entries):
encoded, = struct.unpack_from(sample_format, self.image, offset)
offset += pack_size
logging.debug("Got encoded sample %d: 0x%x" % (i, encoded))
if ((self.session_num_samples % 25) == 0):
if self.format == 'c':
print(" // %d seconds" % (self.session_num_samples / 25))
# Decode it
if version == self.ACTIVITY_RAW_SAMPLES_VERSION_1:
run_size = encoded >> 27
x = ((encoded >> 18) & 0x1FF) << 3
if (x & 0x800):
x = -1 * (0x1000 - x)
y = ((encoded >> 9) & 0x1FF) << 3
if (y & 0x800):
y = -1 * (0x1000 - y)
z = (encoded & 0x1FF) << 3
if (z & 0x800):
z = -1 * (0x1000 - z)
elif version == self.ACTIVITY_RAW_SAMPLES_VERSION_2:
run_size = encoded >> 30
x = ((encoded >> 20) & 0x3FF) << 3
if (x & 0x1000):
x = -1 * (0x2000 - x)
y = ((encoded >> 10) & 0x3FF) << 3
if (y & 0x1000):
y = -1 * (0x2000 - y)
z = (encoded & 0x3FF) << 3
if (z & 0x1000):
z = -1 * (0x2000 - z)
else:
raise RuntimeError("Unimplemented version")
for i in range(run_size):
if (self.format == 'c' and (self.session_num_samples % samples_per_minute) == 0):
print(" // elapsed: %d minutes"
% (self.session_num_samples / samples_per_minute))
self._output_sample(x, y, z)
num_samples_decoded += 1
self.session_num_samples += 1
self.offset += self.ACTIVITY_RAW_SAMPLES_MAX_ENTRIES * pack_size
# Make sure we got the expected # of samples
if num_samples != num_samples_decoded:
raise RuntimeError("Decoding error. Expected to find %d samples, but found %d",
(num_samples, num_samples_decoded))
#############################################################################################
def parse_log_file(self):
# Occasionally, the log file will start with the 2nd line of a 2 line accel sample,
# so if we can't parse from there, we try from the next line down
second_line_offset = None
if not self.bin_file:
# Convert text log file to binary format
bin_data = bytearray()
lines = self.image.split('\n')
prefix = "RAW: "
line_num = 0
for line in lines:
if prefix in line:
base64_str = line[line.index(prefix) + len(prefix):]
content = base64.b64decode(base64_str)
bin_data += content
if line_num == 0:
second_line_offset = len(bin_data)
line_num += 1
self.image = bin_data
# Parse the binary blob
image_len = len(self.image)
first_line = True
while self.offset < image_len:
try:
self._parse_binary_item()
except:
if first_line:
self.offset = 0
self.image = self.image[second_line_offset:]
image_len = len(self.image)
else:
raise
first_line = False
self._output_end_of_session()
#############################################################################################
class ParseMinuteStatsFile(object):
#############################################################################################
def __init__(self, image, bin_file, sample_prefix, format, start_idx):
self.bin_file = bin_file
self.sample_prefix = sample_prefix
self.format = format
self.image = bytearray(image)
self.offset = 0
self.session_minute_idx = 0
self.global_minute_idx = 0
self.session_start_time_utc = None
self.session_elapsed_time = 0
self.start_idx = start_idx
#############################################################################################
def _finish_session(self):
if self.session_start_time_utc is not None:
print(" };")
print(" *len = ARRAY_LENGTH(samples);")
print(" return samples;")
print("}\n")
self.session_start_time_utc = None
#############################################################################################
def _parse_version_2_binary_item(self):
""" For 'c' format, we want this type of format
AlgDlsMinuteData *activity_sample_42(int *len) {
// list of: {steps, orientation, vmc, light}
static AlgDlsMinuteData samples[] = {
{2, 3, 4},
{5, 6, 7},
...
{8, 9, 10},
{11, 12, 13},
};
*len = ARRAY_LENGTH(samples);
return samples;
}
"""
version_pack_size = 2
hdr_format = '<HIIH'
hdr_pack_size = struct.calcsize(hdr_format)
blob_len, time_utc, time_local, num_samples = \
struct.unpack_from(hdr_format, self.image, self.offset)
self.offset += hdr_pack_size
logging.debug("Got blob: local time: %s, utc time: %s, num_samples: %d" %
(datetime.datetime.utcfromtimestamp(time_local),
datetime.datetime.utcfromtimestamp(time_utc), num_samples))
# See if this is a continuation of a previous session, or a new one
if self.session_start_time_utc is not None \
and abs(self.session_start_time_utc + self.session_elapsed_time - time_utc) > 60:
self._finish_session()
if self.global_minute_idx >= self.start_idx:
printing_on = True
else:
printing_on = False
timestamp = datetime.datetime.utcfromtimestamp(time_local)
if self.session_start_time_utc is None:
if self.format == 'c':
self.session_minute_idx = 0
# Starting a new sample
print("\n\n")
print("// ----------------------------------------------------------------")
print("// Sample captured at: %s local, %s GMT" %
(timestamp, datetime.datetime.utcfromtimestamp(time_utc)))
print("AlgDlsMinuteData *activity_sample_%s(int *len) {"
% (timestamp.strftime('%Y_%m_%d_%H_%M_%S')))
print(" // The unit tests parse the //> TEST_.* lines below for test values")
print(" //> TEST_NAME %s" % (self.sample_prefix))
print(SLEEP_DEFAULT_EXPECTED_TEXT)
print(" // list of: {steps, orientation, vmc, ligh}")
print(" static AlgDlsMinuteData samples[] = {")
else:
raise RuntimeError("Only 'c' format is supported for minute stats")
else:
if printing_on:
print(" // %d: Local time: %s" % (self.session_minute_idx,
timestamp.strftime('%Y-%m-%d %I:%M:%S %p')))
# Save the params from the header
self.session_start_time_utc = time_utc
self.session_elapsed_time = num_samples * 60
self.session_minute_idx += num_samples
self.global_minute_idx += num_samples
# Extract each sample
sample_format = '<BBHB'
sample_pack_size = struct.calcsize(sample_format)
expected_len = version_pack_size + hdr_pack_size + num_samples * sample_pack_size
if blob_len != expected_len:
raise RuntimeError("Invalid len in header (%d). Expected %d" % (blob_len, expected_len))
for i in range(num_samples):
steps, orient, vmc, light = struct.unpack_from(sample_format, self.image, self.offset)
self.offset += sample_pack_size
logging.debug("Got sample %d: %d, %d, %d, %d" % (i, steps, orient, vmc, light))
if printing_on:
print(" { %d, 0x%x, %d, 0x%x}," % (steps, orient, vmc, light))
if not printing_on:
self.session_minute_idx = 0
#############################################################################################
def _parse_version_3_to_5_binary_item(self, version):
""" For 'c' format, we want this type of format
AlgDlsMinuteData *activity_sample_42(int *len) {
// list of: {steps, orientation, vmc, light}
static AlgDlsMinuteData samples[] = {
{2, 3, 4, 5},
{5, 6, 7, 8},
...
{8, 9, 10, 11},
{11, 12, 13, 14},
};
*len = ARRAY_LENGTH(samples);
return samples;
}
"""
# NOTE: the version was already pulled out
hdr_format = '<Ib'
hdr_pack_size = struct.calcsize(hdr_format)
time_utc, time_local_offset_15_min = struct.unpack_from(hdr_format, self.image, self.offset)
self.offset += hdr_pack_size
time_local = time_utc + (time_local_offset_15_min * 15 * 60)
# Structure of each minute sample
if version <= 4:
# steps, orientation, vmc. light
sample_format = '<BBHB'
else:
# steps, orientation, vmc, light, flags (plugged_in)
sample_format = '<BBHBB'
sample_pack_size = struct.calcsize(sample_format)
num_samples = 15
# Version 4 and 5 have sample_size and num_samples
if version >= 4:
hdr_aux_format = '<bb'
sample_size, num_samples = struct.unpack_from(hdr_aux_format, self.image, self.offset)
self.offset += struct.calcsize(hdr_aux_format)
if (num_samples != 15):
raise RuntimeError("Invalid num_samples value of %d" % (num_samples))
if (sample_size != sample_pack_size):
raise RuntimeError("Invalid sample size of %d" % (sampleSize))
# Print header info
logging.debug("Got blob: local time: %s, utc time: %s, num_samples: %d" %
(datetime.datetime.utcfromtimestamp(time_local),
datetime.datetime.utcfromtimestamp(time_utc), num_samples))
# See if this is a continuation of a previous session, or a new one
if (self.session_start_time_utc is not None and
abs(self.session_start_time_utc + self.session_elapsed_time - time_utc) > 60):
self._finish_session()
if self.global_minute_idx >= self.start_idx:
printing_on = True
else:
printing_on = False
timestamp = datetime.datetime.utcfromtimestamp(time_local)
if self.session_start_time_utc is None:
if self.format == 'c':
self.session_minute_idx = 0
# Starting a new sample
print("\n\n")
print("// ----------------------------------------------------------------")
print("// Sample captured at: %s local, %s GMT" %
(timestamp, datetime.datetime.utcfromtimestamp(time_utc)))
print("AlgDlsMinuteData *activity_sample_%s(int *len) {"
% (timestamp.strftime('%Y_%m_%d_%H_%M_%S')))
print(" // The unit tests parse the //> TEST_.* lines below for test values")
print(" //> TEST_NAME %s" % (self.sample_prefix))
print(SLEEP_DEFAULT_EXPECTED_TEXT)
print(" // list of: {steps, orientation, vmc, light, plugged_in}")
print(" static AlgDlsMinuteData samples[] = {")
else:
raise RuntimeError("Only 'c' format is supported for minute stats")
else:
if printing_on:
print(" // %d: Local time: %s" % (self.session_minute_idx,
timestamp.strftime('%Y-%m-%d %I:%M:%S %p')))
# Save the params from the header
self.session_start_time_utc = time_utc
self.session_elapsed_time = num_samples * 60
self.session_minute_idx += num_samples
self.global_minute_idx += num_samples
# Extract each sample
for i in range(num_samples):
if version <= 4:
steps, orient, vmc, light = struct.unpack_from(sample_format, self.image,
self.offset)
flags = 0
else:
steps, orient, vmc, light, flags = struct.unpack_from(sample_format, self.image,
self.offset)
self.offset += sample_pack_size
logging.debug("Got sample %d: %d, %d, %d, %d, 0x%x" % (i, steps, orient, vmc, light,
flags))
plugged_in = flags & 0x01
if printing_on:
print(" { %d, 0x%x, %d, %d, %d}," % (steps, orient, vmc, light, plugged_in))
if not printing_on:
self.session_minute_idx = 0
#############################################################################################
def _parse_binary_item(self):
""" For 'c' format, we want this type of format
AlgDlsMinuteData *activity_sample_42(int *len) {
static AlgDlsMinuteData samples[] = {
{2, 3, 4},
{5, 6, 7},
...
{8, 9, 10},
{11, 12, 13},
};
*len = ARRAY_LENGTH(samples);
return samples;
}
"""
# Get the version first
version_format = '<H'
version_pack_size = struct.calcsize(version_format)
version, = \
struct.unpack_from(version_format, self.image, self.offset)
self.offset += version_pack_size
# This is version 2 of AlgDlsRecordHdr
if version == 2:
self._parse_version_2_binary_item()
# This is version 3 or 4 of AlgDlsRecordHdr
elif (version >= 3) and (version <= 5):
self._parse_version_3_to_5_binary_item(version)
else:
raise RuntimeError("This blob has version %d, which is unrecognized" % (version))
#############################################################################################
def parse_log_file(self):
if not self.bin_file:
# Convert text log file to binary format
bin_data = bytearray()
lines = self.image.split('\n')
prefix = "SLP: "
for line in lines:
if prefix in line:
logging.debug("Converting %s to binary" % (line))
base64_str = line[line.index(prefix) + len(prefix):]
content = base64.b64decode(base64_str)
bin_data += content
self.image = bin_data
# Parse the binary blob
image_len = len(self.image)
while self.offset < image_len:
self._parse_binary_item()
self._finish_session()
##################################################################################################
if __name__ == '__main__':
# Collect our command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile', help="The input file. This can be a .bin file obtained using the "
"'pebble data-logging download' command. a .txt log file from a"
"support request, or a JIRA ticket number like 'PBL-25667' (no extension)")
parser.add_argument('--input', choices=['minute_stats', 'accel_samples'],
default='minute_stats',
help="The type of data logging records being fed in."
" 'minute_stats' are generated on the watch with a data logging tag of 81, "
" 'accel_samples' with a tag of 82.")
parser.add_argument('--output', choices=['c', 'pebble-tool'], default='c',
help="The format of the output")
parser.add_argument('--start', type=int, default=0,
help="Start dumping records from this offset. This is helpful if you "
" get a huge log file and only want the end.")
parser.add_argument('--name_prefix', default="walk",
help="Each sample's name in the generated output will begin with this "
"prefix")
parser.add_argument('--expected', type=int, default=-1,
help="When generating accel sample files, imbed this into the meta-data as"
"the expected number of steps")
parser.add_argument('--expected_min', type=int, default=-1,
help="When generating accel sample files, imbed this into the meta-data as"
"the acceptable minimum number of steps")
parser.add_argument('--expected_max', type=int, default=-1,
help="When generating accel sample files, imbed this into the meta-data as"
"the acceptable maximum number of steps")
parser.add_argument('--debug', action='store_true', help="Turn on debug logging")
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
from_jira = False
if args.infile.endswith('.bin'):
input_is_bin = True
infiles = [args.infile]
elif args.infile.endswith('.txt') or args.infile.endswith('.log'):
input_is_bin = False
infiles = [args.infile]
elif '.' in args.infile:
raise RuntimeError("Invalid input file type. Must end with .txt or .bin")
else:
input_is_bin = False
jira = JIRASupport(args.infile)
infiles = jira.all_logs()
from_jira = True
# Figure out a good prefix to use for each sample name
if from_jira:
sample_prefix = args.infile.lower()
else:
sample_prefix = os.path.splitext(os.path.basename(args.infile))[0].lower()
for file in infiles:
if args.input == 'accel_samples':
sample_prefix = "%s_%s" % (args.name_prefix, sample_prefix.replace('-', '_'))
parser = ParseAccelSamplesFile(image=open(file, 'r').read(), bin_file=input_is_bin,
sample_prefix=sample_prefix, format=args.output,
exp=args.expected, exp_min=args.expected_min,
exp_max=args.expected_max)
else:
sample_prefix = "%s" % (sample_prefix.replace('-', '_'))
parser = ParseMinuteStatsFile(image=open(file, 'r').read(), bin_file=input_is_bin,
sample_prefix=sample_prefix, format=args.output,
start_idx=args.start)
parser.parse_log_file()

View file

@ -0,0 +1,43 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Plot the stats contained in the .csv file generated by the test_kraepelin_algorithm
unit test (when STATS_FILE_NAME is defined).
"""
import argparse
import csv
import datetime
import json
import logging
import os
import struct
import sys
import matplotlib.pyplot as pyplot
##################################################################################################
if __name__ == '__main__':
values = [73, 75, 39, 41, 90, 128, 105, 156, 212, 23, 92, 78, 57, 46, 44, 52, 31, 26, 23, 13,
22, 11, 20, 25, 12, 13, 10, 25, 17, 23, 16, 15, 12, 20, 12, 21, 40, 38, 20, 21, 21,
41, 52, 35, 33, 23, 26, 21, 32, 23, 20, 16, 24, 23, 40, 46, 89, 152, 88, 33, 53, 11,
36, 45]
x_axis = range(64)
x_labels = [str(x) for x in x_axis]
pyplot.bar(x_axis, values, align='center')
pyplot.xticks(x_axis, x_labels, rotation='vertical')
pyplot.xlim(x_axis[0] - .5, x_axis[-1] + .5)
pyplot.show()

View file

@ -0,0 +1,78 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Plot the stats contained in the .csv file generated by the test_kraepelin_algorithm
unit test (when STATS_FILE_NAME is defined).
"""
import argparse
import csv
import datetime
import json
import logging
import os
import struct
import sys
import matplotlib.pyplot as pyplot
##################################################################################################
def plot_stats(args, rows, stat_name):
# Get the vmc and score for each of the stepping epochs
stepping_vmcs = []
stepping_scores = []
non_stepping_vmcs = []
non_stepping_scores = []
for row in rows:
if int(row['epoch_type']) == 0:
non_stepping_vmcs.append(int(row['vmc']))
non_stepping_scores.append(int(row[stat_name]))
elif int(row['epoch_type']) == 2:
stepping_vmcs.append(int(row['vmc']))
stepping_scores.append(int(row[stat_name]))
pyplot.plot(stepping_vmcs, stepping_scores, 'go',
non_stepping_vmcs, non_stepping_scores, 'ro')
pyplot.show()
##################################################################################################
if __name__ == '__main__':
# Collect our command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile', help="The input csv file")
parser.add_argument('--plot', choices=['score_0', 'score_lf', 'total'],
default='score_0',
help="Which metric to plot against vmc")
parser.add_argument('--debug', action='store_true', help="Turn on debug logging")
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
# Read in the csv file
col_names = None
rows = []
with open(args.infile, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if reader.line_num == 1:
col_names = [x.strip() for x in row]
else:
rows.append(dict(zip(col_names, row)))
# Plot now
plot_stats(args, rows, args.plot)