Import of the watch repository from Pebble

This commit is contained in:
Matthieu Jeanson 2024-12-12 16:43:03 -08:00 committed by Katharine Berry
commit 3b92768480
10334 changed files with 2564465 additions and 0 deletions

14
waftools/__init__.py Normal file
View file

@ -0,0 +1,14 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

24
waftools/asm.py Normal file
View file

@ -0,0 +1,24 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from waflib import Task
from waflib.TaskGen import extension
class asm(Task.Task):
color = 'BLUE'
run_str = '${AS} ${ASFLAGS} ${DEFINES_ST:DEFINES} -c ${SRC} -o ${TGT}'
@extension('.s', '.S')
def asm_hook(self,node):
return self.create_compiled_task('asm',node)

163
waftools/binary_header.py Normal file
View file

@ -0,0 +1,163 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import sparse_length_encoding
from waflib import Task, TaskGen, Utils, Node, Errors
class binary_header(Task.Task):
"""
Create a header file containing an array with contents from a binary file.
"""
def run(self):
if getattr(self.generator, 'hex', False):
# Input file is hexadecimal ASCII characters with whitespace
text = self.inputs[0].read(
encoding=getattr(self.generator, 'encoding', 'ISO8859-1'))
# Strip all whitespace so that binascii is happy
text = ''.join(text.split())
code = binascii.unhexlify(text)
else:
code = self.inputs[0].read('rb')
array_name = getattr(self.generator, 'array_name', None)
if not array_name:
array_name = re.sub(r'[^A-Za-z0-9]', '_', self.inputs[0].name)
if getattr(self.generator, 'compressed', False):
encoded_code = ''.join(sparse_length_encoding.encode(code))
# verify that it was encoded correctly
if ''.join(sparse_length_encoding.decode(encoded_code)) != code:
raise Errors.WafError('encoding error')
code = encoded_code
output = ['#pragma once', '#include <stdint.h>']
output += ['static const uint8_t %s[] = {' % array_name]
line = []
for n, b in enumerate(code):
line += ['0x%.2x,' % ord(b)]
if n % 16 == 15:
output += [''.join(line)]
line = []
if line:
output += [''.join(line)]
output += ['};', '']
self.outputs[0].write(
'\n'.join(output),
encoding=getattr(self.generator, 'encoding', 'ISO8859-1'))
self.generator.bld.raw_deps[self.uid()] = self.dep_vars = 'array_name'
if getattr(self.generator, 'chmod', None):
os.chmod(self.outputs[0].abspath(), self.generator.chmod)
def sig_vars(self):
dependent_generator_vars = ['hex', 'encoding', 'array_name',
'compressed', 'chmod']
vars = []
for k in dependent_generator_vars:
try:
vars.append((k, getattr(self.generator, k)))
except AttributeError:
pass
self.m.update(Utils.h_list(vars))
return self.m.digest()
@TaskGen.feature('binary_header')
@TaskGen.before_method('process_source', 'process_rule')
def process_binary_header(self):
"""
Define a transformation that substitutes the contents of *source* files to
*target* files::
def build(bld):
bld(
features='binary_header',
source='foo.bin',
target='foo.auto.h',
array_name='s_some_array',
compressed=True
)
bld(
features='binary_header',
source='bar.hex',
target='bar.auto.h',
hex=True
)
If the *hex* parameter is True, the *source* files are read in an ASCII
hexadecimal format, where each byte is represented by a pair of hexadecimal
digits with optional whitespace. If *hex* is False or not specified, the
file is treated as a raw binary file.
If the *compressed* parameter is True, the *source* files are compressed with
sparse length encoding (see waftools/sparse_length_encoding.py).
The name of the array variable defaults to the source file name with all
characters that are invaid C identifiers replaced with underscores. The name
can be explicitly specified by setting the *array_name* parameter.
This method overrides the processing by
:py:meth:`waflib.TaskGen.process_source`.
"""
src = Utils.to_list(getattr(self, 'source', []))
if isinstance(src, Node.Node):
src = [src]
tgt = Utils.to_list(getattr(self, 'target', []))
if isinstance(tgt, Node.Node):
tgt = [tgt]
if len(src) != len(tgt):
raise Errors.WafError('invalid number of source/target for %r' % self)
for x, y in zip(src, tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r' % self)
a, b = None, None
if isinstance(x, str) and isinstance(y, str) and x == y:
a = self.path.find_node(x)
b = self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig = None
b.parent.mkdir()
else:
if isinstance(x, str):
a = self.path.find_resource(x)
elif isinstance(x, Node.Node):
a = x
if isinstance(y, str):
b = self.path.find_or_declare(y)
elif isinstance(y, Node.Node):
b = y
if not a:
raise Errors.WafError('could not find %r for %r' % (x, self))
has_constraints = False
tsk = self.create_task('binary_header', a, b)
for k in ('after', 'before', 'ext_in', 'ext_out'):
val = getattr(self, k, None)
if val:
has_constraints = True
setattr(tsk, k, val)
tsk.before = [k for k in ('c', 'cxx') if k in Task.classes]
self.source = []

View file

@ -0,0 +1,49 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adds `-include` flags for list of files to CFLAGS and ASFLAGS, by adding an
optional attribute `inject_include_files`.
"""
from waflib.Node import Nod3
from waflib.TaskGen import feature, after_method
from waflib.Utils import def_attrs, to_list
@feature('c', 'asm')
@after_method('create_compiled_task')
def process_include_files(self):
def_attrs(self, inject_include_files=None)
if not self.inject_include_files:
return
include_flags = []
for include_file in to_list(self.inject_include_files):
if isinstance(include_file, Nod3):
node = include_file
elif isinstance(include_file, basestring):
node = self.path.find_node(include_file)
if not node:
self.bld.fatal('%s does not exist.' % include_file)
else:
self.bld.fatal('Expecting str or Nod3 in '
'`inject_include_files` list')
include_file_path = node.abspath()
include_flags.append('-include%s' % include_file_path)
self.env.append_unique('CFLAGS', include_flags)
self.env.append_unique('ASFLAGS', include_flags)
for s in self.source:
self.bld.add_manual_dependency(s, node)

View file

@ -0,0 +1,33 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tool that invokes the C preprocessor with any type of file.
"""
# FIXME: convert this from a rule to a task
def c_preproc(task):
args = {
'CC': task.generator.env.CC[0],
'CFLAGS': ' '.join(task.generator.cflags),
'SRC': task.inputs[0].abspath(),
'TGT': task.outputs[0].abspath(),
}
return task.exec_command(
'{CC} -E -P -c {CFLAGS} "{SRC}" -o "{TGT}"'.format(**args))
def configure(ctx):
pass

View file

@ -0,0 +1,73 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding: utf-8
# Christoph Koke, 2013
"""Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html"""
import json
import os
from waflib import Logs, TaskGen, Task
from waflib.Tools import c, cxx
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as json"
database_file = ctx.bldnode.make_node('compile_commands.json')
file_path = str(database_file.path_from(ctx.path))
if not os.path.exists(file_path):
with open(file_path, 'w') as f:
f.write('[]')
Logs.info("Store compile comands in %s" % file_path)
clang_db = dict((x["file"], x) for x in json.load(database_file))
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
filename = task.inputs[0].abspath()
entry = {
"directory" : getattr(task, 'cwd', ctx.variant_dir),
"command" : " ".join(cmd),
"file" : filename,
}
clang_db[filename] = entry
database_file.write(json.dumps(clang_db.values(), indent=2))
def options(opt):
"opitions for clang_compilation_database"
pass
def configure(cfg):
"configure for clang_compilation_database"
pass

21
waftools/compress.py Normal file
View file

@ -0,0 +1,21 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compress(task):
cmd = ['cp', task.inputs[0].abspath(), task.inputs[0].get_bld().abspath()]
task.exec_command(cmd)
cmd = ['xz', '--keep', '--check=crc32', '--lzma2=dict=4KiB', task.inputs[0].get_bld().abspath()]
task.exec_command(cmd)

View file

@ -0,0 +1,22 @@
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
int main(int argc, char **argv ) {
printf("Hello World.\n");
return 0;
}

197
waftools/emscripten.py Normal file
View file

@ -0,0 +1,197 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Waftool that wraps the 'cprogram' feature and extends it with
Emscripten-specific variables, examples:
bld.program(source=sources,
target=target,
emx_pre_js_files=[], # list of nodes used with --pre-js
emx_post_js_files=[], # list of nodes used with --post-js
emx_exported_functions=node, # node to use with EXPORTED_FUNCTIONS
emx_other_settings=[], # list of -s settings
emx_embed_files=[pbpack], # list of nodes used with --embed-file
)
Also adds these optional env variables:
bld.env.EMX_PRE_JS_FILES = []
bld.env.EMX_POST_JS_FILES = []
bld.env.EMX_EXPORTED_FUNCTIONS = []
bld.env.EMX_OTHER_SETTINGS = []
bld.env.EMX_EMBED_FILES = []
bld.env.EMCC_DEBUG = 2
bld.env.EMCC_CORES = 1
"""
import os
from waflib import Logs, Task, TaskGen
# Insipred on https://github.com/waf-project/waf/blob/4ff5b8b7a74dd2ad23600ed7af6a505b90235387/playground/strip/strip.py
def wrap_cprogram_task_class():
classname = 'cprogram'
orig_cls = Task.classes[classname]
emx_cls = type(classname, (orig_cls,), {
'run_str': '${CC} ${CFLAGS} ${DEFINES_ST:DEFINES} ${CPPPATH_ST:INCPATHS} ${SRC} -o ${TGT[0].abspath()} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${EMCC_SETTINGS}',
'ext_out': ['.js'],
# *waf* env vars that affect the output and thus should trigger a rebuild.
# This is by no means a complete list, but just the stuff we use today.
'vars': [
'EMCC_DEBUG',
'EMCC_CORES',
'EMCC_SETTINGS',
'LINKDEPS'
],
'color': 'ORANGE',
})
wrapper_cls = type(classname, (emx_cls,), {})
def init(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
# Set relevant OS environment variables:
self.env.env = {}
self.env.env.update(os.environ)
for key in ['EMCC_DEBUG', 'EMCC_CORES', 'EM_CACHE']:
if self.env[key]: # If not explicitely set, empty list is returned
self.env.env[key] = str(self.env[key])
emx_cls.__init__ = init
def run(self):
if self.env.CC != 'emcc':
return orig_cls.run(self)
return emx_cls.run(self)
wrapper_cls.run = run
cache_primer_node_by_cache_path = {}
def add_cache_primer_node_if_needed(bld, env):
def get_cache_path(env):
return env.EM_CACHE if env.EM_CACHE else '~/.emscripten_cache'
cache_path = get_cache_path(env)
existing_primer_node = \
cache_primer_node_by_cache_path.get(cache_path, None)
if existing_primer_node:
return existing_primer_node
# Build a tiny "hello world" C program to prime the caches:
primer_node = bld.path.get_bld().make_node('emscripten-cache-primer.js')
source_node = bld.path.find_node('waftools/emscripten-cache-primer-main.c')
primer_env = env.derive().detach()
# Force -O2, this will cause optimizer.exe (part of cached files) to be
# compiled:
primer_env['CFLAGS'] = filter(
lambda flag: flag not in ['-O0', '-O1', '-O3', '-Oz', '-Os'],
primer_env['CFLAGS']
)
primer_env['CFLAGS'].append('-O2')
bld.program(
source=[source_node],
target=primer_node,
env=primer_env)
cache_primer_node_by_cache_path[cache_path] = primer_node
return primer_node
@TaskGen.feature('cprogram')
@TaskGen.after_method('apply_link')
def process_emscripten_cprogram_link_args(self):
task = self.link_task
task.env.EMCC_SETTINGS = []
def add_emcc_settings(*args):
for val_or_node in args:
if isinstance(val_or_node, basestring):
val = val_or_node
else:
val = val_or_node.abspath()
task.dep_nodes.append(val_or_node)
task.env.EMCC_SETTINGS.append(val)
def get_rule_and_env_values(var_name):
# Rule values first:
vals = getattr(self, var_name.lower(), [])
# Add env values to the end:
vals.extend(getattr(self.env, var_name.upper()))
return vals
def get_single_rule_or_env_value(var_name):
val = getattr(self, var_name.lower(), None)
if val:
return val
return getattr(self.env, var_name.upper(), None)
for node in get_rule_and_env_values('emx_pre_js_files'):
add_emcc_settings('--pre-js', node)
for node in get_rule_and_env_values('emx_post_js_files'):
add_emcc_settings('--post-js', node)
transform_js_node_and_args = \
get_single_rule_or_env_value('emx_transform_js_node_and_args')
if transform_js_node_and_args:
add_emcc_settings('--js-transform', *transform_js_node_and_args)
exported_functions = get_single_rule_or_env_value('emx_exported_functions')
if exported_functions:
exported_func_path = self.emx_exported_functions.abspath()
add_emcc_settings(
'-s', 'EXPORTED_FUNCTIONS=@{}'.format(exported_func_path))
task.dep_nodes.append(self.emx_exported_functions)
for node in get_rule_and_env_values('emx_embed_files'):
add_emcc_settings('--embed-file', node)
for s in get_rule_and_env_values('emx_other_settings'):
add_emcc_settings('-s', s)
# Emscripten implicitely regenerates caches (libc.bc, dlmalloc.bc,
# struct_info.compiled.json and optimizer.exe) as needed.
# When running multiple instantiations of emcc in parallel, this is
# problematic because they will each race to generate the caches,
# using the same locations for writing/using/executing them.
# See also https://github.com/kripken/emscripten/issues/4151
if self.env.CC == 'emcc':
primer_node = add_cache_primer_node_if_needed(self.bld, self.env)
if task.outputs[0] != primer_node:
task.dep_nodes.append(primer_node)
def build(bld):
wrap_cprogram_task_class()
def configure(conf):
conf.find_program('emcc', errmsg='emscripten is not installed')
conf.find_program('em-config', var='EM-CONFIG')
conf.env.EMSCRIPTEN_ROOT = conf.cmd_and_log([
'em-config', 'EMSCRIPTEN_ROOT'
]).rstrip()
Logs.pprint(
'YELLOW', 'Emscripten path is {}'.format(conf.env.EMSCRIPTEN_ROOT))
conf.env.CC = 'emcc'
conf.env.LINK_CC = 'emcc'
conf.env.AR = 'emar'
# Don't look at the host system headers:
conf.env.CFLAGS.extend([
'-nostdinc',
'-Xclang', '-nobuiltininc',
'-Xclang', '-nostdsysteminc'
])

View file

@ -0,0 +1,28 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define a __FILE_NAME__ macro to expand to the filename of the C/C++ source,
stripping the other path components.
"""
from waflib.TaskGen import feature, after_method
@feature('c')
@after_method('create_compiled_task')
def file_name_c_define(self):
for task in self.tasks:
if len(task.inputs) > 0:
task.env.append_value(
'DEFINES', '__FILE_NAME__="%s"' % task.inputs[0].name)

View file

@ -0,0 +1,47 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from waflib import Logs
from tools.log_hashing.check_elf_log_strings import check_dict_log_strings
from tools.log_hashing.newlogging import get_log_dict_from_file
def wafrule(task):
elf_filename = task.inputs[0].abspath()
log_strings_json_filename = task.outputs[0].abspath()
return generate_log_strings_json(elf_filename, log_strings_json_filename)
def generate_log_strings_json(elf_filename, log_strings_json_filename):
log_dict = get_log_dict_from_file(elf_filename)
if not log_dict:
error = 'Unable to get log strings from {}'.format(elf_filename)
Logs.pprint('RED', error)
return error
# Confirm that the log strings satisfy the rules
output = check_dict_log_strings(log_dict)
if output:
Logs.pprint('RED', output)
return 'NewLogging string formatting error'
# Create log_strings.json
with open(log_strings_json_filename, "w") as json_file:
json.dump(log_dict, json_file, indent=2, sort_keys=True)

View file

@ -0,0 +1,48 @@
#!/usr/bin/python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tools.timezones
from resources.types.resource_definition import ResourceDefinition
from resources.types.resource_object import ResourceObject
import StringIO
def wafrule(task):
olson_database = task.inputs[0].abspath()
reso = generate_resource_object(olson_database)
reso.dump(task.outputs[0])
def generate_resource_object(olson_database):
zoneinfo_list = tools.timezones.build_zoneinfo_list(olson_database)
dstrule_list = tools.timezones.dstrules_parse(olson_database)
zonelink_list = tools.timezones.zonelink_parse(olson_database)
print "{} {} {}".format(len(zoneinfo_list),
len(dstrule_list),
len(zonelink_list))
data_file = StringIO.StringIO()
tools.timezones.zoneinfo_to_bin(zoneinfo_list, dstrule_list, zonelink_list, data_file)
reso = ResourceObject(
ResourceDefinition('raw', 'TIMEZONE_DATABASE', None),
data_file.getvalue())
return reso

75
waftools/gettext.py Normal file
View file

@ -0,0 +1,75 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from waflib import Configure, TaskGen, Task
GETTEXT_KEYWORDS = ['i18n_noop',
'i18n_get', 'i18n_get_with_buffer',
'sys_i18n_get_with_buffer',
'i18n_ctx_noop:1c,2',
'i18n_ctx_get:1c,2', 'i18n_ctx_get_with_buffer:1c,2']
def configure(conf):
conf.find_program('xgettext', exts="", errmsg="""
=======================================================================
`gettext` might not be installed properly.
- If using a Mac, try running `brew install gettext; brew link gettext --force`
- If using Linux, and you fix this error, please insert solution here
=======================================================================""")
conf.find_program('msgcat')
class xgettext(Task.Task):
run_str = ('${XGETTEXT} -c/ -k --from-code=UTF-8 --language=C ' +
' '.join('--keyword=' + word for word in GETTEXT_KEYWORDS) +
' -o ${TGT[0].abspath()} ${SRC}')
class msgcat(Task.Task):
run_str = '${MSGCAT} ${SRC} -o ${TGT}'
@TaskGen.before('process_source')
@TaskGen.feature('gettext')
def do_gettext(self):
sources = [src for src in self.to_nodes(self.source)
if src.suffix() not in ('.s', '.S')]
# There is a convenient to_nodes method for sources (that already exist),
# but no equivalent for targets (files which don't exist yet).
if isinstance(self.target, str):
target = self.path.find_or_declare(self.target)
else:
target = self.target
self.create_task('xgettext', src=sources, tgt=target)
# Bypass the execution of process_source
self.source = []
@TaskGen.before('process_source')
@TaskGen.feature('msgcat')
def do_msgcat(self):
if isinstance(self.target, str):
target = self.path.find_or_declare(self.target)
else:
target = self.target
self.create_task('msgcat', src=self.to_nodes(self.source), tgt=target)
# Bypass the execution of process_source
self.source = []
@Configure.conf
def gettext(self, *args, **kwargs):
kwargs['features'] = 'gettext'
return self(*args, **kwargs)
@Configure.conf
def msgcat(self, *args, **kwargs):
kwargs['features'] = 'msgcat'
return self(*args, **kwargs)

55
waftools/gitinfo.py Normal file
View file

@ -0,0 +1,55 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import waflib.Context
import waflib.Logs
def get_git_revision(ctx):
try:
tag = ctx.cmd_and_log(['git', 'describe'], quiet=waflib.Context.BOTH).strip()
commit = ctx.cmd_and_log(['git', 'rev-parse', 'HEAD'], quiet=waflib.Context.BOTH).strip()
timestamp = ctx.cmd_and_log(['git', 'log', '-1', '--format=%ct', 'HEAD'], quiet=waflib.Context.BOTH).strip()
except Exception:
waflib.Logs.warn('get_git_version: unable to determine git revision')
tag, commit, timestamp = ("?", "?", "1")
# Validate that git tag follows the required form:
# See https://github.com/pebble/tintin/wiki/Firmware,-PRF-&-Bootloader-Versions
# Note: version_regex.groups() returns sequence ('0', '0', '0', 'suffix'):
version_regex = re.search("^v(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:(?:-)(.+))?$", tag)
if version_regex:
# Get version numbers from version_regex.groups() sequence and replace None values with 0
# e.g. v2-beta11 => ('2', None, None, 'beta11') => ('2', '0', '0')
version = [x if x else '0' for x in version_regex.groups()[:3]]
else:
waflib.Logs.warn('get_git_revision: Invalid git tag! '
'Must follow this form: `v0[.0[.0]][-suffix]`')
version = ['0', '0', '0', 'unknown']
# Used for pebble_pipeline payload, generate a string that contains everything after minor.
# Force include patch as 0 if it doesn't exist.
patch_verbose = str(version[2])
str_after_patch = version_regex.groups()[3]
if (str_after_patch):
patch_verbose += '-' + str_after_patch
return {'TAG': tag,
'COMMIT': commit,
'TIMESTAMP': timestamp,
'MAJOR_VERSION': version[0],
'MINOR_VERSION': version[1],
'PATCH_VERSION': version[2],
'MAJOR_MINOR_PATCH_STRING' : ".".join(version[0:3]),
"PATCH_VERBOSE_STRING": patch_verbose}

View file

@ -0,0 +1,60 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from waflib import Logs
from zlib import crc32
from shutil import copyfile
import struct
from elftools.elf.elffile import ELFFile
TEXT_SECTION_NAME = ".text"
TEXT_CRC32_SECTION_NAME = ".text_crc32"
def wafrule(task):
in_file = task.inputs[0].abspath()
out_file = task.outputs[0].abspath()
text_data = get_text_section_data_from_file(in_file)
if not text_data:
error = 'Unable to get {} section from {}'.format(TEXT_SECTION_NAME, in_file)
Logs.pprint('RED', error)
return error
crc = crc32(text_data) & 0xFFFFFFFF
offset = get_text_crc32_section_offset_from_file(in_file)
if not offset:
error = 'Unable to get {} section from {}'.format(TEXT_CRC32_SECTION_NAME, in_file)
Logs.pprint('RED', error)
return error
copyfile(in_file, out_file)
with open(out_file, 'rb+') as file:
file.seek(offset)
file.write(struct.pack('<I', crc))
def get_text_section_data_from_file(filename):
with open(filename, 'rb') as file:
section = ELFFile(file).get_section_by_name(TEXT_SECTION_NAME)
return section.data() if section is not None else None
def get_text_crc32_section_offset_from_file(filename):
with open(filename, 'rb') as file:
section = ELFFile(file).get_section_by_name(TEXT_CRC32_SECTION_NAME)
return section['sh_offset'] if section is not None else None

View file

@ -0,0 +1,202 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import xml.dom.minidom
"""
Based on the following understanding of what Jenkins can parse for JUnit XML files.
<?xml version="1.0" encoding="utf-8"?>
<testsuites errors="1" failures="1" tests="3" time="45">
<testsuite errors="1" failures="1" hostname="localhost" id="0" name="base_test_1"
package="testdb" tests="3" timestamp="2012-11-15T01:02:29">
<properties>
<property name="assert-passed" value="1"/>
</properties>
<testcase classname="testdb.directory" name="001-passed-test" time="10"/>
<testcase classname="testdb.directory" name="002-failed-test" time="20">
<failure message="Assertion FAILED: some failed assert" type="failure">
the output of the testcase
</failure>
</testcase>
<testcase classname="package.directory" name="003-errord-test" time="15">
<error message="Assertion ERROR: some error assert" type="error">
the output of the testcase
</error>
</testcase>
<testcase classname="testdb.directory" name="003-passed-test" time="10">
<system-out>
I am system output
</system-out>
<system-err>
I am the error output
</system-err>
</testcase>
</testsuite>
</testsuites>
"""
class TestSuite(object):
"""Suite of test cases"""
def __init__(self, name, test_cases=None, hostname=None, id=None, \
package=None, timestamp=None, properties=None):
self.name = name
if not test_cases:
test_cases = []
try:
iter(test_cases)
except TypeError:
raise Exception('test_cases must be a list of test cases')
self.test_cases = test_cases
self.hostname = hostname
self.id = id
self.package = package
self.timestamp = timestamp
self.properties = properties
def build_xml_doc(self):
"""Builds the XML document for the JUnit test suite"""
# build the test suite element
test_suite_attributes = dict()
test_suite_attributes['name'] = str(self.name)
test_suite_attributes['failures'] = str(len([c for c in self.test_cases if c.is_failure()]))
test_suite_attributes['errors'] = str(len([c for c in self.test_cases if c.is_error()]))
test_suite_attributes['tests'] = str(len(self.test_cases))
if self.hostname:
test_suite_attributes['hostname'] = str(self.hostname)
if self.id:
test_suite_attributes['id'] = str(self.id)
if self.package:
test_suite_attributes['package'] = str(self.package)
if self.timestamp:
test_suite_attributes['timestamp'] = str(self.timestamp)
xml_element = ET.Element("testsuite", test_suite_attributes)
# add any properties
if self.properties:
props_element = ET.SubElement(xml_element, "properties")
for k, v in self.properties.items():
attrs = { 'name' : str(k), 'value' : str(v) }
ET.SubElement(props_element, "property", attrs)
# test cases
for case in self.test_cases:
test_case_attributes = dict()
test_case_attributes['name'] = str(case.name)
if case.elapsed_sec:
test_case_attributes['time'] = "%f" % case.elapsed_sec
if case.classname:
test_case_attributes['classname'] = str(case.classname)
test_case_element = ET.SubElement(xml_element, "testcase", test_case_attributes)
# failures
if case.is_failure():
attrs = { 'type' : 'failure' }
if case.failure_message:
attrs['message'] = case.failure_message
failure_element = ET.Element("failure", attrs)
if case.failure_output:
failure_element.text = case.failure_output
test_case_element.append(failure_element)
# errors
if case.is_error():
attrs = { 'type' : 'error' }
if case.error_message:
attrs['message'] = case.error_message
error_element = ET.Element("error", attrs)
if case.error_output:
error_element.text = case.error_output
test_case_element.append(error_element)
# test stdout
if case.stdout:
stdout_element = ET.Element("system-out")
stdout_element.text = case.stdout
test_case_element.append(stdout_element)
# test stderr
if case.stderr:
stderr_element = ET.Element("system-err")
stderr_element.text = case.stderr
test_case_element.append(stderr_element)
return xml_element
@staticmethod
def to_xml_string(test_suites, prettyprint=True):
"""Returns the string representation of the JUnit XML document"""
try:
iter(test_suites)
except TypeError:
raise Exception('test_suites must be a list of test suites')
xml_element = ET.Element("testsuites")
for ts in test_suites:
xml_element.append(ts.build_xml_doc())
xml_string = ET.tostring(xml_element)
if prettyprint:
try:
xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
except:
pass
return xml_string
@staticmethod
def to_file(file_descriptor, test_suites, prettyprint=True):
"""Writes the JUnit XML document to file"""
file_descriptor.write(TestSuite.to_xml_string(test_suites, prettyprint))
class TestCase(object):
"""A JUnit test case with a result and possibly some stdout or stderr"""
def __init__(self, name, classname=None, elapsed_sec=None, stdout=None, stderr=None):
self.name = name
self.elapsed_sec = elapsed_sec
self.stdout = stdout
self.stderr = stderr
self.classname = classname
self.error_message = None
self.error_output = None
self.failure_message = None
self.failure_output = None
def add_error_info(self, message=None, output=None):
"""Adds an error message, output, or both to the test case"""
if message:
self.error_message = message
if output:
self.error_output = output
def add_failure_info(self, message=None, output=None):
"""Adds a failure message, output, or both to the test case"""
if message:
self.failure_message = message
if output:
self.failure_output = output
def is_failure(self):
"""returns true if this test case is a failure"""
return self.failure_output or self.failure_message
def is_error(self):
"""returns true if this test case is an error"""
return self.error_output or self.error_message

118
waftools/lcov_info_parser.py Executable file
View file

@ -0,0 +1,118 @@
#!/usr/bin/python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
def fail(msg, waf_bld=None):
''' Convenience function to fail with `exit(-1)` or, if available, waf's `bld.fatal()`. '''
if waf_bld:
waf_bld.fatal(msg)
else:
print(msg)
exit(-1)
class LcovInfoFileRecord(object):
''' A convenience class for processing lcov.info file records for Arcanist. '''
def __init__(self, file_path, root_to_strip=None, waf_bld=None):
# Create a "coverage list" as long as the number of lines in the source file where the index
# is the line number (e.g. index 0 is line 1) and the element represents the Arcanist
# coverage character. Initialize all elements as 'N' for "Not executable".
try:
self.coverage_list = ['N' for i in range(sum(1 for line in open(file_path)))]
except IOError:
fail('Failed to open source file path to count total number of lines: %s' % file_path,
waf_bld=waf_bld)
# If provided, strip a root path from the front of the source file path because Arcanist
# expects source file paths relative to the root of the repo
if root_to_strip:
self.file_path = file_path.replace(root_to_strip, '')
else:
self.file_path = file_path
def process_da_line_info(self, da_line_info):
da_line_info_data = da_line_info.split(',')
if len(da_line_info_data) != 2:
print('Skipping lcov.info da line data due to parsing failure: %s' % da_line_info)
return
# Extract the line number and execution count, converting them from strings to integers
line_number, execution_count = map(int, da_line_info_data)
# Line numbers start with 1 so subtract 1 before recording coverage status
self.coverage_list[line_number - 1] = 'C' if execution_count > 0 else 'U'
def get_arcanist_coverage_string(self):
# Arcanist expects a coverage string where character n represents line n as follows:
# - 'N': Not executable. Comment or whitespace to be ignored for coverage.
# - 'C': Covered. This line has test coverage.
# - 'U': Uncovered. This line is executable but has no test coverage.
# - 'X': Unreachable. (If detectable) Unreachable code.
# See https://secure.phabricator.com/book/phabricator/article/arcanist_coverage/
return ''.join(self.coverage_list)
def get_arcanist_coverage_dictionary(self):
# See https://secure.phabricator.com/book/phabricator/article/arcanist_coverage/
return {'file_path': self.file_path,
'coverage_string': self.get_arcanist_coverage_string()}
def parse_lcov_info_for_arcanist(lcov_info_file_path, root_to_strip=None, waf_bld=None):
''' Parse an lcov.info file and return a list of Arcanist code coverage dictionaries. '''
coverage_results = []
with open(lcov_info_file_path) as lcov_info_file:
current_file_record = None
for line in lcov_info_file.read().splitlines():
# We only care about a subset of the lcov.info file, namely:
# 1. "SF" lines denote a source file path and the start of its record
# 2. "DA" lines denote a tuple of "<LINE_NUMBER>,<EXECUTION_COUNT>"
# 3. "end_of_record" lines denote the end of a record
if line == 'end_of_record':
if current_file_record is None:
fail('Saw "end_of_record" before start of a file record', waf_bld=waf_bld)
# "end_of_record" denotes the end of a record, so add the record to our results
coverage_results.append(current_file_record.get_arcanist_coverage_dictionary())
# Reset our data
current_file_record = None
else:
# Other lcov.info lines look like "<INFO_TYPE>:<INFO>", so first parse for this data
line_data = line.split(':')
if len(line_data) != 2:
print('Skipping unrecognized lcov.info line: %s' % line)
continue
info_type, info = line_data
if info_type == 'SF':
if current_file_record is not None:
fail('Saw start of new file record before previous file record ended',
waf_bld=waf_bld)
current_file_record = LcovInfoFileRecord(info,
root_to_strip=root_to_strip,
waf_bld=waf_bld)
elif info_type == 'DA':
if current_file_record is None:
fail('Saw line data before a file record started', waf_bld=waf_bld)
current_file_record.process_da_line_info(info)
return coverage_results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_to_strip', type=str, help='Root to strip from front of file paths')
parser.add_argument('lcov_info_file', type=str, help='Path to lcov.info file')
args = parser.parse_args()
print(parse_lcov_info_for_arcanist(args.lcov_info_file, root_to_strip=args.root_to_strip))

42
waftools/ldscript.py Normal file
View file

@ -0,0 +1,42 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from waflib import Utils, Errors, Node
from waflib.TaskGen import after, feature
@after('apply_link')
@feature('cprogram', 'cshlib')
def process_ldscript(self):
if not getattr(self, 'ldscript', None) or self.env.CC_NAME != 'gcc':
return
def convert_to_node(node_or_path_str):
if isinstance(node_or_path_str, basestring):
return self.path.make_node(node_or_path_str)
else:
return node_or_path_str
if isinstance(self.ldscript, basestring) or \
isinstance(self.ldscript, list):
ldscripts = Utils.to_list(self.ldscript)
else: # Assume Nod3
ldscripts = [self.ldscript]
nodes = [convert_to_node(node) for node in ldscripts]
for node in nodes:
if not node:
raise Errors.WafError('could not find %r' % self.ldscript)
self.link_task.env.append_value('LINKFLAGS', '-T%s' % node.abspath())
self.link_task.dep_nodes.append(node)

41
waftools/objcopy.py Normal file
View file

@ -0,0 +1,41 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# FIXME: For some reason this doesn't work with multiple rules with the same input extension.
#from waflib import TaskGen
#TaskGen.declare_chain(name='hex', rule='${OBJCOPY} -O ihex ${SRC} ${TGT}', ext_in='.elf', ext_out='.hex')
#TaskGen.declare_chain(name='bin', rule='${OBJCOPY} -O binary ${SRC} ${TGT}', ext_in='.elf', ext_out='.bin')
def objcopy(task, mode, extra_args=None):
cmd = 'arm-none-eabi-objcopy -S -R .stack -R .priv_bss -R .bss '
if hasattr(task.generator, 'extra_args'):
cmd += '%s ' % (task.generator.extra_args)
if extra_args is not None:
cmd += '%s ' % (extra_args)
cmd += '-O %s "%s" "%s"' % (mode, task.inputs[0].abspath(), task.outputs[0].abspath())
return task.exec_command(cmd)
def objcopy_fill_bss(task, mode):
return task.exec_command('arm-none-eabi-objcopy -O %s -j .text -j .data '
'-j .bss --set-section-flags .bss=alloc,load,contents "%s" "%s"' %
(mode, task.inputs[0].abspath(), task.outputs[0].abspath()))
def objcopy_hex(task):
return objcopy(task, 'ihex')
def objcopy_bin(task):
return objcopy(task, 'binary')

210
waftools/openocd.py Normal file
View file

@ -0,0 +1,210 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import pexpect
import re
import string
import subprocess
import sys
import waflib
from waflib import Logs
JTAG_OPTIONS = {'olimex': 'source [find interface/ftdi/olimex-arm-usb-ocd-h.cfg]',
'fixture': 'source [find interface/flossjtag-noeeprom.cfg]',
'bb2': 'source waftools/openocd_bb2_ftdi.cfg',
'bb2-legacy': 'source waftools/openocd_bb2_ft2232.cfg',
'jtag_ftdi': 'source waftools/openocd_jtag_ftdi.cfg',
'swd_ftdi': 'source waftools/openocd_swd_ftdi.cfg',
'swd_jlink': 'source waftools/openocd_swd_jlink.cfg',
'swd_stlink': 'source [find interface/stlink-v2.cfg]',
}
OPENOCD_TELNET_PORT = 4444
@contextlib.contextmanager
def daemon(ctx, cfg_file, use_swd=False):
if _is_openocd_running():
yield
else:
if use_swd:
expect_str = "SWD IDCODE"
else:
expect_str = "device found"
proc = pexpect.spawn('openocd', ['-f', cfg_file], logfile=sys.stdout)
# Wait for OpenOCD to connect to the board:
result = proc.expect([expect_str, pexpect.TIMEOUT], timeout=10)
if result == 0:
yield
else:
raise Exception("Timed out connecting OpenOCD to development board...")
proc.close()
def _has_openocd(ctx):
try:
ctx.cmd_and_log(['which', 'openocd'], quiet=waflib.Context.BOTH)
return True
except:
return False
def _is_openocd_running():
import socket
import errno
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', OPENOCD_TELNET_PORT))
s.close()
except socket.error as e:
s.close()
return e[0] == errno.EADDRINUSE
return False
def run_command(ctx, cmd, ignore_fail=False, expect=[], timeout=40,
shutdown=True, enforce_expect=False, cfg_file="openocd.cfg"):
if _is_openocd_running():
import telnetlib
t = telnetlib.Telnet('', OPENOCD_TELNET_PORT)
Logs.info("Sending commands to OpenOCD daemon:\n%s\n..." % cmd)
t.write("%s\n" % cmd)
for regex in expect:
idx, match, text = t.expect([regex], timeout)
if enforce_expect and idx == -1:
# They'll see the full story in another window
ctx.fatal("OpenOCD expectation '%s' unfulfilled" % regex)
t.close()
else:
fail_handling = ' || true ' if ignore_fail else ''
if shutdown:
# append 'shutdown' to make openocd exit:
cmd = "%s ; shutdown" % cmd
ctx.exec_command('openocd -f %s -c "%s" 2>&1 | tee .waf.openocd.log %s' %
(cfg_file, cmd, fail_handling), stdout=None, stderr=None)
if enforce_expect:
# Read the result
with open(".waf.openocd.log", "r") as result_file:
result = result_file.read()
match_start = 0
for regex in expect:
expect_match = re.search(regex, result[match_start:])
if not expect_match:
ctx.fatal("OpenOCD expectation '%s' unfulfilled" % regex)
match_start = expect_match.end()
def _get_supported_interfaces(ctx):
if not _has_openocd(ctx):
return []
# Ugh, openocd exits with status 1 when not specifying an interface...
try:
ctx.cmd_and_log(['openocd', '-c', '"interface_list"'],
quiet=waflib.Context.BOTH,
output=waflib.Context.STDERR)
except Exception as e:
# Ugh, openocd prints the output to stderr...
out = e.stderr
out_lines = out.splitlines()
interfaces = []
for line in out_lines:
matches = re.search("\d+: (\w+)", line)
if matches:
interfaces.append(matches.groups()[0])
return interfaces
def get_flavor(conf):
""" Returns a 2-tuple (is_newer_than_0_7_0, is_pebble_flavor) """
try:
version_string = conf.cmd_and_log(['openocd', '--version'],
quiet=waflib.Context.BOTH,
output=waflib.Context.STDERR)
version_string = version_string.splitlines()[0]
matches = re.search("(\d+)\.(\d+)\.(\d+)", version_string)
version = map(int, matches.groups())
return (version[0] >= 0 and version[1] >= 7,
'pebble' in version_string)
except Exception:
Logs.error("Couldn't parse openocd version")
return (False, False)
def _get_reset_conf(conf, is_newer_than_0_7_0, should_connect_assert_srst):
if is_newer_than_0_7_0:
options = ['trst_and_srst', 'srst_nogate']
if should_connect_assert_srst:
options.append('connect_assert_srst')
return ' '.join(options)
else:
return 'trst_and_srst'
def write_cfg(conf):
jtag = conf.env.JTAG
if jtag == 'bb2':
if 'ftdi' not in _get_supported_interfaces(conf):
jtag = 'bb2-legacy'
Logs.warn('OpenOCD is not compiled with --enable-ftdi, falling'
' back to legacy ft2232 driver.')
if conf.env.MICRO_FAMILY == 'STM32F2':
target = 'stm32f2x.cfg'
elif conf.env.MICRO_FAMILY == 'STM32F4':
target = 'stm32f4x.cfg'
elif conf.env.MICRO_FAMILY == 'STM32F7':
target = 'stm32f7x.cfg'
(is_newer_than_0_7_0, is_pebble_flavor) = get_flavor(conf)
reset_config = _get_reset_conf(conf, is_newer_than_0_7_0, False)
Logs.info("reset_config: %s" % reset_config)
if is_pebble_flavor:
Logs.info("openocd is Pebble flavored!")
os_name = 'Pebble_FreeRTOS'
else:
os_name = 'FreeRTOS'
openocd_cfg = OPENOCD_CFG_TEMPLATE.substitute(jtag=JTAG_OPTIONS[jtag],
target=target,
reset_config=reset_config,
os_name=os_name)
waflib.Utils.writef('./openocd.cfg', openocd_cfg)
OPENOCD_CFG_TEMPLATE = string.Template("""
# THIS IS A GENERATED FILE: See waftools/openocd.py for details
${jtag}
source [find target/${target}]
reset_config ${reset_config}
$$_TARGETNAME configure -rtos ${os_name}
$$_TARGETNAME configure -event gdb-attach {
echo "Halting target because GDB is attaching..."
halt
}
$$_TARGETNAME configure -event gdb-detach {
echo "Resuming target because GDB is detaching..."
resume
}
""")

View file

@ -0,0 +1,6 @@
# LEGACY ALERT! The ft2232 OpenOCD driver is deprecated.
interface ft2232
ft2232_vid_pid 0x0403 0x6010
ft2232_device_desc "Dual RS232-HS"
ft2232_layout "usbjtag"
ft2232_latency 2

View file

@ -0,0 +1,11 @@
interface ftdi
ftdi_device_desc "Dual RS232-HS"
ftdi_vid_pid 0x0403 0x6010
# output value, direction (1 for output, 0 for input)
ftdi_layout_init 0x1848 0x185b
ftdi_layout_signal nTRST -data 0x0010 -oe 0x0010
ftdi_layout_signal nSRST -data 0x0040 -oe 0x0040
# Red + Green LED (inverted output: low is on)
ftdi_layout_signal LED -ndata 0x1800 -oe 0x1800

View file

@ -0,0 +1,15 @@
interface ftdi
ftdi_device_desc "Quad RS232-HS"
ftdi_vid_pid 0x0403 0x6011
# output value, direction (1 for output, 0 for input)
ftdi_layout_init 0x1848 0x185b
ftdi_layout_signal nTRST -data 0x0010 -oe 0x0010
ftdi_layout_signal nSRST -data 0x0040 -oe 0x0040
# Red + Green LED (inverted output: low is on)
# TX LED (GREEN)
ftdi_layout_signal LED -ndata 0x0080 -oe 0x0080
# RX LED (RED)
ftdi_layout_signal LED2 -ndata 0x0020 -oe 0x0020

View file

@ -0,0 +1,24 @@
interface ftdi
ftdi_vid_pid 0x0403 0x6011 0x0403 0x7893 0x0403 0x7894
# output value, direction (1 for output, 0 for input)
ftdi_layout_init 0x1848 0x185b
ftdi_layout_signal SWD_EN -data 0 -oe 0
ftdi_layout_signal SWDIO_OE -data 0 -oe 0
ftdi_layout_signal nSRST -data 0x0040 -oe 0x0040
# Red + Green LED (inverted output: low is on)
# TX LED (GREEN)
ftdi_layout_signal LED -ndata 0x0080 -oe 0x0080
# RX LED (RED)
ftdi_layout_signal LED2 -ndata 0x0020 -oe 0x0020
transport select swd
# Note: This works around issue where reset_config does not actually toggle this line. It would be
# nice to figure out what the actual issue with 'reset_config' is at some point
proc init_reset { mode } {
ftdi_set_signal nSRST 0
sleep 100
ftdi_set_signal nSRST z
}

View file

@ -0,0 +1,2 @@
interface jlink
transport select swd

276
waftools/pebble_arm_gcc.py Normal file
View file

@ -0,0 +1,276 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import waflib
from waflib import Utils
from waflib.Configure import conf
def find_clang_path(conf):
""" Find the first clang on our path with a version greater than 3.2"""
out = conf.cmd_and_log('which -a clang')
paths = out.splitlines()
for path in paths:
# Make sure clang is at least version 3.3
out = conf.cmd_and_log('%s --version' % path)
r = re.findall(r'clang version (\d+)\.(\d+)', out)
if len(r):
version_major = int(r[0][0])
version_minor = int(r[0][1])
if version_major > 3 or (version_major == 3 and version_minor >= 3):
return path
conf.fatal('No version of clang 3.3+ found on your path!')
def find_toolchain_path(conf):
possible_paths = ['~/arm-cs-tools/arm-none-eabi',
'/usr/local/Cellar/arm-none-eabi-gcc/arm/arm-none-eabi']
for p in possible_paths:
if os.path.isdir(p):
return p
conf.fatal('could not find arm-none-eabi folder')
def find_sysroot_path(conf):
""" The sysroot is a directory struct that looks like /usr/bin/ that includes custom
headers and libraries for a target. We want to use the headers from our mentor
toolchain, but they don't have a structure like /usr/bin. Therefore, if this is
first time configuring on a system, create the directory structure with the
appropriate symlinks so things work out.
Note that this is a bit of a hack. Ideally our toolchain setup script will produce
the directory structure that we expect, but I'm not going to muck with the toolchain
too much until I get everything working end to end as opposed to having to rebuild
our toolchain all the time. """
toolchain_path = find_toolchain_path(conf)
sysroot_path = os.path.join(toolchain_path, 'sysroot')
if not os.path.isdir(sysroot_path):
waflib.Logs.pprint('CYAN', 'Sysroot dir not found at %s, creating...', sysroot_path)
os.makedirs(os.path.join(sysroot_path, 'usr/local/'))
os.symlink(os.path.join(toolchain_path, 'include/'),
os.path.join(sysroot_path, 'usr/local/include'))
return sysroot_path
@conf
def using_clang_compiler(ctx):
compiler_name = ctx.env.CC
if isinstance(ctx.env.CC, list):
compiler_name = ctx.env.CC[0]
if 'CCC_CC' in os.environ:
compiler_name = os.environ['CCC_CC']
return 'clang' in compiler_name
def options(opt):
opt.add_option('--relax_toolchain_restrictions', action='store_true',
help='Allow us to compile with a non-standard toolchain')
opt.add_option('--use_clang', action='store_true',
help='(EXPERIMENTAL) Uses clang instead of gcc as our compiler')
opt.add_option('--use_env_cc', action='store_true',
help='Use whatever CC is in the environment as our compiler')
opt.add_option('--beta', action='store_true',
help='Build in beta mode '
'(--beta and --release are mutually exclusive)')
opt.add_option('--release', action='store_true',
help='Build in release mode'
' (--beta and --release are mutually exclusive)')
opt.add_option('--fat_firmware', action='store_true',
help='build in GDB mode WITH logs; requires 1M of onbaord flash')
opt.add_option('--gdb', action='store_true',
help='build in GDB mode (no optimization, no logs)')
opt.add_option('--lto', action='store_true', help='Enable link-time optimization')
opt.add_option('--no-lto', action='store_true', help='Disable link-time optimization')
opt.add_option('--save_temps', action='store_true',
help='Save *.i and *.s files during compilation')
opt.add_option('--no_debug', action='store_true',
help='Remove -g debug information. See --save_temps')
def configure(conf):
CROSS_COMPILE_PREFIX = 'arm-none-eabi-'
conf.env.AS = CROSS_COMPILE_PREFIX + 'gcc'
conf.env.AR = CROSS_COMPILE_PREFIX + 'gcc-ar'
if conf.options.use_env_cc:
pass # Don't touch conf.env.CC
elif conf.options.use_clang:
conf.env.CC = find_clang_path(conf)
else:
conf.env.CC = CROSS_COMPILE_PREFIX + 'gcc'
conf.env.LINK_CC = conf.env.CC
conf.load('gcc')
conf.env.append_value('CFLAGS', [ '-std=c11', ])
c_warnings = [ '-Wall',
'-Wextra',
'-Werror',
'-Wpointer-arith',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-error=unused-function',
'-Wno-error=unused-variable',
'-Wno-error=unused-parameter' ]
if conf.using_clang_compiler():
sysroot_path = find_sysroot_path(conf)
# Disable clang warnings from now... they don't quite match
c_warnings = []
conf.env.append_value('CFLAGS', [ '-target', 'arm-none-eabi' ])
conf.env.append_value('CFLAGS', [ '--sysroot', sysroot_path ])
# Clang doesn't enable short-enums by default since
# arm-none-eabi is an unsupported target
conf.env.append_value('CFLAGS', '-fshort-enums')
arm_toolchain_path = find_toolchain_path(conf)
conf.env.append_value('CFLAGS', [ '-B' + arm_toolchain_path ])
conf.env.append_value('LINKFLAGS', [ '-target', 'arm-none-eabi' ])
conf.env.append_value('LINKFLAGS', [ '--sysroot', sysroot_path ])
else:
# These warnings only exist in GCC
c_warnings.append('-Wno-error=unused-but-set-variable')
c_warnings.append('-Wno-packed-bitfield-compat')
if not ('4', '8') <= conf.env.CC_VERSION <= ('4', '9', '3'):
# Verify the toolchain we're using is allowed. This is to prevent us from accidentally
# building and releasing firmwares that are built in ways we haven't tested.
if not conf.options.relax_toolchain_restrictions:
TOOLCHAIN_ERROR_MSG = \
"""=== INVALID TOOLCHAIN ===
Either upgrade your toolchain using the process listed here:
https://pebbletechnology.atlassian.net/wiki/display/DEV/Firmware+Toolchain
Or re-configure with the --relax_toolchain_restrictions option. """
conf.fatal('Invalid toolchain detected!\n' + \
repr(conf.env.CC_VERSION) + '\n' + \
TOOLCHAIN_ERROR_MSG)
conf.env.CFLAGS.append('-I' + conf.path.abspath() + '/src/fw/util/time')
conf.env.append_value('CFLAGS', c_warnings)
conf.add_platform_defines(conf.env)
conf.env.ASFLAGS = [ '-xassembler-with-cpp', '-c' ]
conf.env.AS_TGT_F = '-o'
conf.env.append_value('LINKFLAGS', [ '-Wl,--warn-common' ])
args = [ '-fvar-tracking-assignments', # Track variable locations better
'-mthumb',
'-ffreestanding',
'-ffunction-sections',
'-fbuiltin',
'-fno-builtin-itoa' ]
if not conf.options.no_debug:
args += [ '-g3', # Extra debugging info, including macro definitions
'-gdwarf-4' ] # More detailed debug info
if conf.options.save_temps:
args += [ '-save-temps=obj' ]
if conf.options.lto:
args += [ '-flto' ]
if not using_clang_compiler(conf):
# None of these options are supported by clang
args += [ '-flto-partition=balanced',
'--param','lto-partitions=128', # Can be trimmed down later
'-fuse-linker-plugin',
'-fno-if-conversion',
'-fno-caller-saves',
'-fira-region=mixed',
'-finline-functions',
'-fconserve-stack',
'--param','inline-unit-growth=1',
'--param','max-inline-insns-auto=1',
'--param','max-cse-path-length=1000',
'--param','max-grow-copy-bb-insns=1',
'-fno-hoist-adjacent-loads',
'-fno-optimize-sibling-calls',
'-fno-schedule-insns2' ]
cpu_fpu = None
if conf.env.MICRO_FAMILY == "STM32F2":
args += [ '-mcpu=cortex-m3' ]
elif conf.env.MICRO_FAMILY == "STM32F4":
args += [ '-mcpu=cortex-m4']
cpu_fpu = "fpv4-sp-d16"
elif conf.env.MICRO_FAMILY == "STM32F7":
args += [ '-mcpu=cortex-m7']
cpu_fpu = "fpv5-d16"
# QEMU does not have FPU
if conf.env.QEMU:
cpu_fpu = None
if cpu_fpu:
args += [ "-mfloat-abi=softfp",
"-mfpu="+cpu_fpu ]
else:
# Not using float-abi=softfp means no FPU instructions.
# It also defines __SOFTFP__=1
# Yes that define name is super misleading, but what can you do.
pass
conf.env.append_value('CFLAGS', args)
conf.env.append_value('ASFLAGS', args)
conf.env.append_value('LINKFLAGS', args)
conf.env.SHLIB_MARKER = None
conf.env.STLIB_MARKER = None
# Set whether or not we show the "Your Pebble just reset..." alert
if conf.options.release and conf.options.beta:
raise RuntimeError("--beta and --release are mutually exclusive and cannot be used together")
if not conf.options.release:
conf.env.append_value('DEFINES', [ 'SHOW_PEBBLE_JUST_RESET_ALERT' ])
conf.env.append_value('DEFINES', [ 'SHOW_BAD_BT_STATE_ALERT' ])
if not conf.is_bigboard():
conf.env.append_value('DEFINES', [ 'SHOW_ACTIVITY_DEMO' ])
# Set optimization level
if conf.options.beta:
optimize_flags = '-Os'
print "Beta mode"
elif conf.options.release:
optimize_flags = '-Os'
print "Release mode"
elif conf.options.fat_firmware:
optimize_flags = '-O0'
conf.env.IS_FAT_FIRMWARE = True
print 'Building Fat Firmware (no optimizations, logging enabled)'
elif conf.options.gdb:
optimize_flags = '-Og'
print "GDB mode"
else:
optimize_flags = '-Os'
print 'Debug Mode'
conf.env.append_value('CFLAGS', optimize_flags)
conf.env.append_value('LINKFLAGS', optimize_flags)

111
waftools/pebble_sdk_gcc.py Normal file
View file

@ -0,0 +1,111 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from waflib.Errors import BuildError
import inject_metadata
def configure(conf):
"""
This method is called from the configure method of the pebble_sdk waftool to setup the
environment variables for compiling a 3rd party app
:param conf: the ConfigContext
:return: None
"""
CROSS_COMPILE_PREFIX = 'arm-none-eabi-'
conf.env.AS = CROSS_COMPILE_PREFIX + 'gcc'
conf.env.AR = CROSS_COMPILE_PREFIX + 'ar'
conf.env.CC = CROSS_COMPILE_PREFIX + 'gcc'
conf.env.LD = CROSS_COMPILE_PREFIX + 'ld'
conf.env.SIZE = CROSS_COMPILE_PREFIX + 'size'
optimize_flag = '-Os'
conf.load('gcc')
pebble_cflags = ['-std=c99',
'-mcpu=cortex-m3',
'-mthumb',
'-ffunction-sections',
'-fdata-sections',
'-g',
'-fPIE',
optimize_flag]
c_warnings = ['-Wall',
'-Wextra',
'-Werror',
'-Wno-unused-parameter',
'-Wno-error=unused-function',
'-Wno-error=unused-variable']
if (conf.env.SDK_VERSION_MAJOR == 5) and (conf.env.SDK_VERSION_MINOR > 19):
pebble_cflags.append('-D_TIME_H_')
pebble_cflags.extend(c_warnings)
pebble_linkflags = ['-mcpu=cortex-m3',
'-mthumb',
'-Wl,--gc-sections',
'-Wl,--warn-common',
'-fPIE',
optimize_flag]
conf.env.prepend_value('CFLAGS', pebble_cflags)
conf.env.prepend_value('LINKFLAGS', pebble_linkflags)
conf.env.SHLIB_MARKER = None
conf.env.STLIB_MARKER = None
# -----------------------------------------------------------------------------------
def gen_inject_metadata_rule(bld, src_bin_file, dst_bin_file, elf_file, resource_file, timestamp,
has_pkjs, has_worker):
"""
Copy from src_bin_file to dst_bin_file and inject the correct meta-data into the
header of dst_bin_file
:param bld: the BuildContext
:param src_bin_file: the path to the pebble-app.raw.bin file
:param dst_bin_file: the path to the pebble-app.bin
:param elf_file: the path to the pebble-app.elf file
:param resource_file: the path to the resource pack
:param timestamp: the timestamp of the project build
:param has_pkjs: boolean for whether the project contains code using PebbleKit JS
:param has_worker: boolean for whether the project has a worker binary
"""
def inject_data_rule(task):
bin_path = task.inputs[0].abspath()
elf_path = task.inputs[1].abspath()
if len(task.inputs) >= 3:
res_path = task.inputs[2].abspath()
else:
res_path = None
tgt_path = task.outputs[0].abspath()
# First copy the raw bin that the compiler produced to a new location. This way we'll have
# the raw binary around to inspect just in case anything went wrong while we were injecting
# metadata.
cp_result = task.exec_command('cp "{}" "{}"'.format(bin_path, tgt_path))
if cp_result < 0:
raise BuildError("Failed to copy %s to %s!" % (bin_path, tgt_path))
# Now actually inject the metadata into the new copy of the binary.
inject_metadata.inject_metadata(tgt_path, elf_path, res_path, timestamp,
allow_js=has_pkjs, has_worker=has_worker)
sources = [src_bin_file, elf_file]
if resource_file is not None:
sources.append(resource_file)
bld(rule=inject_data_rule, name='inject-metadata', source=sources, target=dst_bin_file)

View file

@ -0,0 +1,24 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def set_env_sdk_version(self, process_info_node):
with open(process_info_node.abspath(), 'r') as f:
for line in f:
if "PROCESS_INFO_CURRENT_SDK_VERSION_MAJOR" in line:
self.env.SDK_VERSION_MAJOR = int(line.split(' ')[2].rstrip(), 16)
if "PROCESS_INFO_CURRENT_SDK_VERSION_MINOR" in line:
self.env.SDK_VERSION_MINOR = int(line.split(' ')[2].rstrip(), 16)
return

444
waftools/pebble_test.py Normal file
View file

@ -0,0 +1,444 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from waflib.TaskGen import before, after, feature, taskgen_method
from waflib import Errors, Logs, Options, Task, Utils, Node
from waftools import junit_xml
from string import Template
import hashlib
import json
import lcov_info_parser
import os
import re
import unicodedata as ud
@feature('pebble_test')
@after('apply_link')
def make_test(self):
if not 'cprogram' in self.features and not 'cxxprogram' in self.features:
Logs.error('test cannot be executed %s'%self)
return
if getattr(self, 'link_task', None):
sources = [self.link_task.outputs[0]]
task = self.create_task('run_test', sources)
runtime_deps = getattr(self.link_task.generator, 'runtime_deps', None)
if runtime_deps is not None:
task.dep_nodes = runtime_deps
# Lock to prevent concurrent modifications of the utest_results list. We may
# have multiple tests running and finishing at the same time.
import threading
testlock = threading.Lock()
class run_test(Task.Task):
color = 'PINK'
def runnable_status(self):
if self.generator.bld.options.no_run:
return Task.SKIP_ME
ret = super(run_test, self).runnable_status()
if ret==Task.SKIP_ME:
# FIXME: We probably don't need to rerun tests if the inputs don't change, but meh, whatever.
return Task.RUN_ME
return ret
def run_test(self, test_runme_node, cwd):
# Execute the test normally:
try:
timer = Utils.Timer()
filename = test_runme_node.abspath()
args = [filename]
if filename.endswith('.js'):
args.insert(0, 'node')
if self.generator.bld.options.test_name:
args.append("-t%s" % (self.generator.bld.options.test_name))
if self.generator.bld.options.list_tests:
self.generator.bld.options.show_output = True
args.append("-l")
proc = Utils.subprocess.Popen(args, cwd=cwd, stderr=Utils.subprocess.PIPE,
stdout=Utils.subprocess.PIPE)
(stdout, stderr) = proc.communicate()
except OSError:
Logs.pprint('RED', 'Failed to run test: %s' % filename)
return
if self.generator.bld.options.show_output:
print stdout
print stderr
tup = (test_runme_node, proc.returncode, stdout, stderr, str(timer))
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
a = getattr(self.generator.bld, 'added_post_fun', False)
if not a:
self.generator.bld.add_post_fun(summary)
self.generator.bld.added_post_fun = True
finally:
testlock.release()
def run(self):
test_runme_node = self.inputs[0]
cwd = self.inputs[0].parent.abspath()
if self.generator.bld.options.debug_test:
# Only debug the first test encountered. In case the -M option was
# omitted or a lot of tests were matched, it would otherwise result
# in repeatedly launching the debugger... poor dev xp :)
is_added = getattr(self.generator.bld, 'added_debug_fun', False)
if not is_added:
# Create a post-build closure to execute:
test_filename_abspath = test_runme_node.abspath()
if test_filename_abspath.endswith('.js'):
fmt = 'node-debug {ARGS}'
cmd = fmt.format(ARGS=test_filename_abspath)
else:
build_dir = self.generator.bld.bldnode.abspath()
fmt = 'gdb --cd={CWD} --directory={BLD_DIR} --args {ARGS}'
cmd = fmt.format(CWD=cwd, BLD_DIR=build_dir,
ARGS=test_filename_abspath)
def debug_test(bld):
# Execute the test within gdb for debugging:
os.system(cmd)
self.generator.bld.add_post_fun(debug_test)
self.generator.bld.added_debug_fun = True
else:
Logs.pprint('RED', 'More than one test was selected! '
'Debugging only the first one encountered...')
else:
self.run_test(test_runme_node, cwd)
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if not lst: return
# Write a jUnit xml report for further processing by Jenkins:
test_suites = []
for (node, code, stdout, stderr, duration) in lst:
# FIXME: We don't get a status per test, only at the suite level...
# Perhaps clar itself should do the reporting?
def strip_non_ascii(s):
return "".join(i for i in s if ord(i) < 128)
test_case = junit_xml.TestCase('all')
if code:
# Include stdout and stderr if test failed:
test_case.stdout = strip_non_ascii(stdout)
test_case.stderr = strip_non_ascii(stderr)
test_case.add_failure_info(message='failed')
suite_name = node.parent.relpath()
test_suite = junit_xml.TestSuite(suite_name, [test_case])
test_suites.append(test_suite)
report_xml_string = junit_xml.TestSuite.to_xml_string(test_suites)
bld.bldnode.make_node('junit.xml').write(report_xml_string)
total = len(lst)
fail = len([x for x in lst if x[1]])
Logs.pprint('CYAN', 'test summary')
Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-fail, total))
for (node, code, out, err, duration) in lst:
if not code:
Logs.pprint('GREEN', ' %s' % node.abspath())
if fail > 0:
Logs.pprint('RED', ' tests that fail %d/%d' % (fail, total))
for (node, code, out, err, duration) in lst:
if code:
Logs.pprint('RED', ' %s' % node.abspath())
# FIXME: Make UTF-8 print properly, see PBL-29528
print(ud.normalize('NFKD', out.decode('utf-8')).encode('ascii', 'ignore'))
print(ud.normalize('NFKD', err.decode('utf-8')).encode('ascii', 'ignore'))
raise Errors.WafError('test failed')
@taskgen_method
@feature("test_product_source")
def test_product_source_hook(self):
""" This function is a "task generator". It's going to generate one or more tasks to actually
build our objects.
"""
# Create a "c" task with the given inputs and outputs. This will use the class named "c"
# defined in the waflib/Tools/c.py file provided by waf.
self.create_task('c', self.product_src, self.product_out)
def build_product_source_files(bld, test_dir, include_paths, defines, cflags, product_sources):
""" Build the "product sources", which are the parts of our code base that are under test
as well as any fakes we need to link against as well.
Return a list of the compiled object nodes that we should later link against.
This function attempts to share object files with other tests that use the same product
sources and with the same compilation configuration. We can't always reuse objects
because two tests might use different defines or include paths, but where we can we do.
"""
top_dir = bld.root.find_dir(bld.top_dir)
# Hash the configuration information. Some lists are order dependent, some aren't. When they're not
# order dependent sort them so we have a higher likelihood of colliding and finding an existing
# object file for this.
h = hashlib.md5()
h.update(Utils.h_list(include_paths))
h.update(Utils.h_list(sorted(defines)))
h.update(Utils.h_list(sorted(cflags)))
compile_args_hash_str = h.hexdigest()
if not hasattr(bld, 'utest_product_sources'):
bld.utest_product_sources = set()
product_objects = []
for s in product_sources:
# Make sure everything in the list is a node
if isinstance(s, basestring):
src_node = bld.path.find_node(s)
else:
src_node = s
rel_path = src_node.path_from(top_dir)
bld_args_dir = top_dir.get_bld().find_or_declare(compile_args_hash_str)
out_node = bld_args_dir.find_or_declare(rel_path).change_ext('.o')
product_objects.append(out_node)
if out_node not in bld.utest_product_sources:
# If we got here that means that we haven't built this product source yet. Build it now.
bld.utest_product_sources.add(out_node)
bld(features="test_product_source c",
product_src=src_node,
product_out=out_node,
includes=include_paths,
cflags=cflags,
defines=defines)
return product_objects
def get_bitdepth_for_platform(bld, platform):
if platform in ('snowy', 'spalding', 'robert'):
return 8
elif platform in ('tintin', 'silk'):
return 1
else:
bld.fatal('Unknown platform {}'.format(platform))
def add_clar_test(bld, test_name, test_source, sources_ant_glob, product_sources, test_libs,
override_includes, add_includes, defines, runtime_deps, platform, use):
if not bld.options.regex and bld.variant == 'test_rocky_emx':
# Include tests starting with test_rocky... only!
bld.options.regex = 'test_rocky'
if (bld.options.regex):
filename = str(test_source).strip()
if not re.match(bld.options.regex, filename):
return
platform_set = set(['default', 'tintin', 'snowy', 'spalding', 'silk', 'robert'])
#validate platforms specified
if platform not in platform_set:
raise ValueError("Invalid platform {} specified, valid platforms are {}".format(
platform, ', '.join(platform_set)))
platform_product_sources = list(product_sources)
platform = platform.lower()
platform_defines = []
if platform == 'default':
test_dir = bld.path.get_bld().make_node(test_name)
node_name = 'runme'
if bld.variant == 'test_rocky_emx':
node_name += '.js'
test_bin = test_dir.make_node(node_name)
platform = 'snowy'
# add a default platform define so file selection can use non-platform pbi/png files
platform_defines.append('PLATFORM_DEFAULT=1')
else:
test_dir = bld.path.get_bld().make_node(test_name + '_' + platform)
test_bin = test_dir.make_node('runme_' + platform)
platform_defines.append('PLATFORM_DEFAULT=0')
if platform == 'silk' or platform == 'robert':
platform_defines.append('CAPABILITY_HAS_PUTBYTES_PREACKING=1')
def _generate_clar_harness(task):
bld = task.generator.bld
clar_dir = task.generator.env.CLAR_DIR
test_src_file = task.inputs[0].abspath()
test_bld_dir = task.outputs[0].get_bld().parent.abspath()
cmd = 'python {0}/clar.py --file={1} --clar-path={0} {2}'.format(clar_dir, test_src_file, test_bld_dir)
task.generator.bld.exec_command(cmd)
clar_harness = test_dir.make_node('clar_main.c')
# Should make this a general task like the objcopy ones.
bld(name='generate_clar_harness',
rule=_generate_clar_harness,
source=test_source,
target=[clar_harness, test_dir.make_node('clar.h')])
src_includes = [ "tests/overrides/default",
"tests/stubs",
"tests/fakes",
"tests/test_includes",
"tests",
"src/include",
"src/core",
"src/fw",
"src/libbtutil/include",
"src/libos/include",
"src/libutil/includes",
"src/boot",
"src/fw/applib/vendor/tinflate",
"src/fw/applib/vendor/uPNG",
"src/fw/vendor/jerryscript/jerry-core",
"src/fw/vendor/jerryscript/jerry-core/jcontext",
"src/fw/vendor/jerryscript/jerry-core/jmem",
"src/fw/vendor/jerryscript/jerry-core/jrt",
"src/fw/vendor/jerryscript/jerry-core/lit",
"src/fw/vendor/jerryscript/jerry-core/vm",
"src/fw/vendor/jerryscript/jerry-core/ecma/builtin-objects",
"src/fw/vendor/jerryscript/jerry-core/ecma/base",
"src/fw/vendor/jerryscript/jerry-core/ecma/operations",
"src/fw/vendor/jerryscript/jerry-core/parser/js",
"src/fw/vendor/jerryscript/jerry-core/parser/regexp",
"src/fw/vendor/FreeRTOS/Source/include",
"src/fw/vendor/FreeRTOS/Source/portable/GCC/ARM_CM3_PEBBLE",
"src/fw/vendor/nanopb" ]
# Use Snowy's resource headers as a fallback if we don't override it here
resource_override_dir_name = platform if platform in ('silk', 'robert') else 'snowy'
src_includes.append("tests/overrides/default/resources/{}".format(resource_override_dir_name))
override_includes = ['tests/overrides/' + f for f in override_includes]
src_includes = override_includes + src_includes
if add_includes is not None:
src_includes.extend(add_includes)
src_includes = [os.path.join(bld.srcnode.abspath(), f) for f in src_includes]
includes = src_includes
# Add the generated IDL headers
root_build_dir = bld.path.get_bld().abspath().replace(bld.path.relpath(), '')
idl_includes = [root_build_dir + 'src/idl']
includes += idl_includes
if use is None:
use = []
# Add DUMA for memory corruption checking
# conditionally disable duma based on DUMA_DISABLED being defined
# DUMA is found in tests/vendor/duma
use += ['libutil', 'libutil_includes', 'libos_includes', 'libbtutil', 'libbtutil_includes']
if 'DUMA_DISABLED' not in defines and 'DUMA_DISABLED' not in bld.env.DEFINES:
use.append('duma')
test_libs.append('pthread') # DUMA depends on pthreads
test_libs.append('m') # Add libm math.h functions
# pulling in display.h and display_<platform>.h
# we force include these per platform so platform specific code using
# ifdefs are triggered correctly without reconfiguring/rebuilding all unit tests per platform
board_path = bld.srcnode.find_node('src/fw/board').abspath()
util_path = bld.srcnode.find_node('src/fw/util').abspath()
bitdepth = get_bitdepth_for_platform(bld, platform)
cflags_force_include = ['-Wno-unused-command-line-argument']
cflags_force_include.append('-include' + board_path + '/displays/display_' + platform + '.h')
platform_defines += ['PLATFORM_' + platform.upper(), 'PLATFORM_NAME="%s"' % platform] +\
['SCREEN_COLOR_DEPTH_BITS=%d' % bitdepth]
if sources_ant_glob is not None:
platform_sources_ant_glob = sources_ant_glob
# handle platform specific files (ex. display_${PLATFORM}.c)
platform_sources_ant_glob = Template(platform_sources_ant_glob).substitute(
PLATFORM=platform, BITDEPTH=bitdepth)
sources_list = Utils.to_list(platform_sources_ant_glob)
for s in sources_list:
node = bld.srcnode.find_node(s)
if node is None:
raise Errors.WafError('Error: Source file "%s" not found for "%s"' % (s, test_name))
if node not in platform_product_sources:
platform_product_sources.append(node)
else:
raise Errors.WafError('Error: Duplicate source file "%s" found for "%s"' % (s, test_name))
program_sources = [test_source, clar_harness]
program_sources.extend(build_product_source_files(
bld, test_dir, includes, defines + platform_defines, cflags_force_include,
platform_product_sources))
bld.program(source=program_sources,
target=test_bin,
features='pebble_test',
includes=[test_dir.abspath()] + includes,
lib=test_libs,
defines=defines + platform_defines,
cflags=cflags_force_include,
use=use,
runtime_deps=runtime_deps)
def clar(bld, sources=None, sources_ant_glob=None, test_sources_ant_glob=None,
test_sources=None, test_libs=[], override_includes=[], add_includes=None, defines=None,
test_name=None, runtime_deps=None, platforms=None, use=None):
if test_sources_ant_glob is None and not test_sources:
raise Exception()
if test_sources is None:
test_sources = []
# Make a copy so if we modify it we don't accidentally modify the callers list
defines = list(defines or [])
defines.append('UNITTEST')
if platforms is None:
platforms = ['default']
if sources is None:
sources = []
if test_sources_ant_glob:
glob_sources = bld.path.ant_glob(test_sources_ant_glob)
test_sources.extend([s for s in glob_sources if not os.path.basename(s.abspath()).startswith('clar')])
Logs.debug("ut: Test sources %r", test_sources)
if len(test_sources) == 0:
Logs.pprint('RED', 'No tests found for glob: %s' % test_sources_ant_glob)
for test_source in test_sources:
if test_name is None:
test_name = test_source.name
test_name = test_name[:test_name.rfind('.')] # Scrape the extension
for platform in platforms:
add_clar_test(bld, test_name, test_source, sources_ant_glob, sources, test_libs,
override_includes, add_includes, defines, runtime_deps, platform, use)

120
waftools/protoc.py Normal file
View file

@ -0,0 +1,120 @@
# encoding: utf-8
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Philipp Bender, 2012
# Matt Clarkson, 2012
import re
from waflib.Task import Task
from waflib.TaskGen import extension, after_method
"""
A simple tool to integrate protocol buffers into your build system.
Adapted for nanopb from waftools/extras/protoc.py.
Example::
def configure(conf):
conf.load('compiler_c c protoc')
def build(bld):
bld(
features = 'c cprogram'
source = 'main.c file1.proto proto/file2.proto',
include = '. proto',
target = 'executable')
Notes when using this tool:
- protoc command line parsing is tricky.
The generated files can be put in subfolders which depend on
the order of the include paths.
Try to be simple when creating task generators
containing protoc stuff.
"""
class protoc(Task):
# protoc expects the input proto file to be an absolute path.
run_str = '${PROTOC} ${PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${SRC[0].abspath()}'
color = 'BLUE'
ext_out = ['.h', 'pb.c']
def scan(self):
"""
Scan .proto dependencies
"""
node = self.inputs[0]
nodes = []
names = []
seen = []
if not node: return (nodes, names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code = node.read().splitlines()
for line in code:
m = re.search(r'^import\s+"(.*)";.*(//)?.*', line)
if m:
dep = m.groups()[0]
for incpath in self.generator.includes_nodes:
found = incpath.find_resource(dep)
if found:
nodes.append(found)
parse_node(found)
else:
names.append(dep)
parse_node(node)
return (nodes, names)
@extension('.proto')
def process_protoc(self, node):
c_node = node.change_ext('.pb.c')
h_node = node.change_ext('.pb.h')
self.create_task('protoc', node, [c_node, h_node])
self.source.append(c_node)
if 'c' in self.features and not self.env.PROTOC_FLAGS:
# Each piece wrapped in [] explained.
# ~ [--nanopb_out=]-I%s:%s
# How we push arguments through protoc into nanopb_generator.py
# ~ --nanopb_out=[-I%s]:%s
# Pass in a path where nanopb_generator should search for .options files
# ~ --nanopb_out=-I%s[:]%s
# Separates the option args for nanopb_generator from the actual output folder
# ~ --nanopb_out=-I%s:[%s]
# Specifies the output folder
self.env.PROTOC_FLAGS = '--nanopb_out=-I%s:%s' % (node.parent.abspath(),
node.parent.get_bld().bldpath())
use = getattr(self, 'use', '')
if not 'PROTOBUF' in use:
self.use = self.to_list(use) + ['PROTOBUF']
def configure(conf):
missing_nanopb = """
'nanopb' cannot be found on the system.
Follow the instructions on the wiki for installing it: https://pebbletechnology.atlassian.net/wiki/display/DEV/Getting+Started+with+Firmware
"""
conf.find_program('protoc-gen-nanopb', errmsg=missing_nanopb)
conf.find_program('protoc', var='PROTOC', errmsg=missing_nanopb)
conf.env.PROTOC_ST = '-I%s'

18
waftools/ragel.py Normal file
View file

@ -0,0 +1,18 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def ragel(task):
task.exec_command('ragel -o "%s" -C "%s"' % (task.outputs[0].abspath(),
task.inputs[0].abspath()))

View file

@ -0,0 +1,74 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import pipes
from waflib import ConfigSet, Options
from waflib.Build import BuildContext
from waflib.Configure import conf
def load_lockfile(env, basepath):
lockfile_path = os.path.join(basepath, Options.lockfile)
try:
env.load(lockfile_path)
except IOError:
raise ValueError('{} is not configured yet'.format(os.path.basename(os.getcwd())))
except Exception:
raise ValueError('Could not load {}'.format(lockfile_path))
@conf
def get_lockfile(ctx):
env = ConfigSet.ConfigSet()
try:
load_lockfile(env, ctx.out_dir)
except ValueError:
try:
load_lockfile(env, ctx.top_dir)
except ValueError as err:
ctx.fatal(str(err))
return
return env
class show_configure(BuildContext):
"""shows the last used configure command"""
cmd = 'show_configure'
def execute_build(ctx):
env = ctx.get_lockfile()
if not env:
return
argv = env.argv
# Configure time environment vars
for var in ['CFLAGS']:
if var in env.environ:
argv = ['{}={}'.format(var, pipes.quote(env.environ[var]))] + argv
# Persistent environment vars
for var in ['WAFLOCK']:
if var in env.environ:
argv = ['export {}={};'.format(var, pipes.quote(env.environ[var]))] + argv
# Print and force waf to complete without further output
print(' '.join(argv))
exit()

View file

@ -0,0 +1,184 @@
#!/usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sparse Length Encoding
A variant of run-length encoding which is tuned specifically to encode binary
data with long runs of zeroes interspersed with random (poorly-compressible)
data.
The format is fairly simple. The encoded data is a stream of octets (bytes)
beginning with a one-octet header. This header octet is the 'escape byte' that
indicates to the decoder that it and the following octets should be treated
specially. The encoder selects this escape byte to be an octet which occurs
least frequently (or not at all) in the decoded data.
The following octets of the encoded data are emitted literally until an escape
byte is encountered. The escape byte marks the start of an 'escape sequence'
comprised of the escape byte itself and one or two following bytes.
- The escape byte followed by 0x00 indicates the end of input.
- The escape byte followed by 0x01 means 'emit a literal escape byte'
- The escape byte followed by a byte "b" between 0x02 and 0x7f inclusive means
'emit b zeroes'. This two-byte sequence can encode a run of length 2-127.
- The escape byte followed by a byte "b" equal to or greater than 0x80
(i.e. with the MSB set) means 'take the next byte "c" and emit
((b & 0x7f) << 8 | c)+0x80 zeroes'. This three-byte sequence can encode a run
of length 128-32895.
The minimum overhead for this encoding scheme is three bytes: header and
end-of-input escape sequence.
"""
from collections import Counter
from itertools import groupby
_MAX_COUNT = 0x807F # max is ((0x7F << 8) | (0xFF) + 0x80
def encode(source):
# Analyze the source data to select the escape byte. To keep things simple, we don't allow 0 to
# be the escape character.
source = bytes(source)
frequency = Counter({chr(n): 0 for n in range(1, 256)})
frequency.update(source)
# most_common() doesn't define what happens if there's a tie in frequency. Let's always pick
# the lowest value of that frequency to make the encoding predictable.
occurences = frequency.most_common()
escape = min(x[0] for x in occurences if x[1] == occurences[-1][1])
yield escape
for b, g in groupby(source):
if b == b'\0':
# this is a run of zeros
count = len(list(g))
while count >= 0x80:
# encode the number of zeros using two bytes
unit = min(count, _MAX_COUNT)
count -= unit
unit -= 0x80
yield escape
yield chr(((unit >> 8) & 0x7F) | 0x80)
yield chr(unit & 0xFF)
if count == 1:
# can't encode a length of 1 zero, so just emit it directly
yield b
elif 1 < count < 0x80:
# encode the number of zeros using one byte
yield escape
yield chr(count)
elif count < 0:
raise Exception('Encoding malfunctioned')
else:
# simply insert the characters (and escape the escape character)
for _ in g:
yield b
if b == escape:
yield b'\1'
yield escape
yield b'\0'
def decode(stream):
stream = iter(stream)
escape = next(stream)
while True:
char = next(stream)
if char == escape:
code = next(stream)
if code == b'\0':
return
elif code == b'\1':
yield escape
else:
if ord(code) & 0x80 == 0:
count = ord(code)
else:
count = (((ord(code) & 0x7f) << 8) | ord(next(stream))) + 0x80
assert(count <= _MAX_COUNT)
for _ in xrange(count):
yield b'\0'
else:
yield char
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
# run unit tests
import unittest
class TestSparseLengthEncoding(unittest.TestCase):
def test_empty(self):
raw_data = ''
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x01\x00')
def test_no_zeros(self):
raw_data = '\x02\xff\xef\x99'
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x02\xff\xef\x99\x01\x00')
def test_one_zero(self):
raw_data = '\x00'
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x00\x01\x00')
def test_small_number_of_zeros(self):
# under 0x80 zeros
raw_data = '\0' * 0x0040
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x01\x40\x01\x00')
self.assertEquals(decoded_data, raw_data)
def test_medium_number_of_zeros(self):
# between 0x80 and 0x807f zeros
raw_data = '\0' * 0x1800
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x01\x97\x80\x01\x00')
self.assertEquals(decoded_data, raw_data)
def test_remainder_one(self):
# leaves a remainder of 1 zero
raw_data = '\0' * (0x807f + 1)
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x01\xff\xff\x00\x01\x00')
self.assertEquals(decoded_data, raw_data)
def test_remainder_under_128(self):
# leaves a remainder of 100 zeros
raw_data = '\0' * (0x807f + 100)
encoded_data = ''.join(encode(raw_data))
decoded_data = ''.join(decode(encoded_data))
self.assertEquals(encoded_data, '\x01\x01\xff\xff\x01\x64\x01\x00')
self.assertEquals(decoded_data, raw_data)
unittest.main()
elif len(sys.argv) == 2:
# encode the specified file
data = open(sys.argv[1], 'rb').read()
encoded = ''.join(encode(data))
if ''.join(decode(encoded)) != data:
raise Exception('Invalid encoding')
sys.stdout.write(''.join(encode(f)))
else:
raise Exception('Invalid arguments')

1
waftools/stm32_crc.py Symbolic link
View file

@ -0,0 +1 @@
../tools/stm32_crc.py

413
waftools/xcode_pebble.py Normal file
View file

@ -0,0 +1,413 @@
#! /usr/bin/env python
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding: utf-8
# XCode 3/XCode 4 generator for Waf
# Nicolas Mercier 2011
"""
Usage:
def options(opt):
opt.load('xcode')
$ waf configure xcode
"""
# TODO: support iOS projects
from waflib import Context, TaskGen, Build, Utils
import os, sys, random, time
HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)'
MAP_EXT = {
'.h' : "sourcecode.c.h",
'.hh': "sourcecode.cpp.h",
'.inl': "sourcecode.cpp.h",
'.hpp': "sourcecode.cpp.h",
'.c': "sourcecode.c.c",
'.m': "sourcecode.c.objc",
'.mm': "sourcecode.cpp.objcpp",
'.cc': "sourcecode.cpp.cpp",
'.cpp': "sourcecode.cpp.cpp",
'.C': "sourcecode.cpp.cpp",
'.cxx': "sourcecode.cpp.cpp",
'.c++': "sourcecode.cpp.cpp",
'.l': "sourcecode.lex", # luthor
'.ll': "sourcecode.lex",
'.y': "sourcecode.yacc",
'.yy': "sourcecode.yacc",
'.plist': "text.plist.xml",
".nib": "wrapper.nib",
".xib": "text.xib",
}
SOURCE_EXT = frozenset(['.c', '.cpp', '.m', '.cxx', '.c++', '.C', '.cc', '.s', '.S'])
part1 = 0
part2 = 10000
part3 = 0
id = 562000999
def newid():
global id
id = id + 1
return "%04X%04X%04X%012d" % (0, 10000, 0, id)
class XCodeNode:
def __init__(self):
self._id = newid()
def tostring(self, value):
if isinstance(value, dict):
result = "{\n"
for k,v in value.items():
result = result + "\t\t\t%s = %s;\n" % (k, self.tostring(v))
result = result + "\t\t}"
return result
elif isinstance(value, str):
return "\"%s\"" % value
elif isinstance(value, list):
result = "(\n"
for i in value:
result = result + "\t\t\t%s,\n" % self.tostring(i)
result = result + "\t\t)"
return result
elif isinstance(value, XCodeNode):
return value._id
else:
return str(value)
def write_recursive(self, value, file):
if isinstance(value, dict):
for k,v in value.items():
self.write_recursive(v, file)
elif isinstance(value, list):
for i in value:
self.write_recursive(i, file)
elif isinstance(value, XCodeNode):
value.write(file)
def write(self, file):
for attribute,value in self.__dict__.items():
if attribute[0] != '_':
self.write_recursive(value, file)
w = file.write
w("\t%s = {\n" % self._id)
w("\t\tisa = %s;\n" % self.__class__.__name__)
for attribute,value in self.__dict__.items():
if attribute[0] != '_':
w("\t\t%s = %s;\n" % (attribute, self.tostring(value)))
w("\t};\n\n")
# Configurations
class XCBuildConfiguration(XCodeNode):
def __init__(self, name, settings = {}, env=None):
XCodeNode.__init__(self)
self.baseConfigurationReference = ""
self.buildSettings = settings
self.name = name
if env and env.ARCH:
settings['ARCHS'] = " ".join(env.ARCH)
settings['COMBINE_HIDPI_IMAGES'] = 'YES'
settings['ONLY_ACTIVE_ARCH'] = 'YES'
def config_octest(self):
self.buildSettings = {'PRODUCT_NAME':'$(TARGET_NAME)', 'WRAPPER_EXTENSION':'octest', 'COMBINE_HIDPI_IMAGES':'YES', 'ONLY_ACTIVE_ARCH':'YES'}
class XCConfigurationList(XCodeNode):
def __init__(self, settings):
XCodeNode.__init__(self)
self.buildConfigurations = settings
self.defaultConfigurationIsVisible = 0
self.defaultConfigurationName = settings and settings[0].name or ""
# Group/Files
class PBXFileReference(XCodeNode):
def __init__(self, name, path, filetype = '', sourcetree = "<group>"):
XCodeNode.__init__(self)
self.fileEncoding = 4
if not filetype:
_, ext = os.path.splitext(name)
filetype = MAP_EXT.get(ext, 'text')
self.lastKnownFileType = filetype
self.name = name
if os.path.isabs(path):
sourcetree = '<absolute>'
self.path = path
else:
sourcetree = '<group>'
self.path = os.path.basename(path)
class PBXGroup(XCodeNode):
def __init__(self, name, sourcetree = "<group>"):
XCodeNode.__init__(self)
self.children = []
self.name = name
self.path = name
self.sourceTree = sourcetree
def add(self, root, sources):
folders = {}
def folder(n):
if n == root:
return self
try:
return folders[n]
except KeyError:
f = PBXGroup(n.name)
p = folder(n.parent)
folders[n] = f
p.children.append(f)
return f
for s in sources:
f = folder(s.parent)
source = PBXFileReference(s.name, s.abspath())
f.children.append(source)
def add_all_files_from_folder_path(self, directory):
files = []
def should_skip(filepath):
name = os.path.basename(os.path.abspath(filepath))
return name.startswith('.') or os.path.splitext(name)[1] == '.xcodeproj' or name == 'build' # or has_hidden_attribute(filepath)
for name in os.listdir(directory):
path = os.path.join(directory, name)
if should_skip(path):
continue
if os.path.isfile(path):
fileref=PBXFileReference(os.path.basename(path), path)
self.children.append(fileref)
files.append(fileref)
elif os.path.isdir(path):
subgroup = PBXGroup(name)
files.extend(subgroup.add_all_files_from_folder_path(path))
self.children.append(subgroup)
return files
# Targets
class PBXLegacyTarget(XCodeNode):
def __init__(self,target=''):
XCodeNode.__init__(self)
self.buildConfigurationList = XCConfigurationList([XCBuildConfiguration('waf')])
self.buildArgumentsString="$(ACTION)"
self.buildPhases = []
self.buildToolPath="./waf-xcode.sh"
self.buildWorkingDirectory = ""
self.dependencies = []
self.name = target
self.productName = target
self.passBuildSettingsInEnvironment = 0
class PBXShellScriptBuildPhase(XCodeNode):
def __init__(self, script):
XCodeNode.__init__(self)
self.buildActionMask = 2147483647
self.files = []
self.inputPaths = []
self.outputPaths = []
self.runOnlyForDeploymentPostProcessing = 1
self.shellPath = "/bin/sh"
self.shellScript = script
class PBXNativeTarget(XCodeNode):
def __init__(self, action=None, target=None, node=None, env=None, script=None, productType="com.apple.product-type.application"):
XCodeNode.__init__(self)
opts = {'PRODUCT_NAME':target, 'HEADER_SEARCH_PATHS': "$(SRCROOT)/../src/**"}
if node:
opts['CONFIGURATION_BUILD_DIR'] = node.parent.abspath()
conf = XCBuildConfiguration('waf', opts, env)
self.buildConfigurationList = XCConfigurationList([conf])
self.buildPhases = []
if script != None:
self.buildPhases.append(PBXShellScriptBuildPhase(script))
self.buildRules = []
self.dependencies = []
self.name = target
self.productName = target
self.productType = productType
if node: product_dir = node.abspath()
else: product_dir = ""
self.productReference = PBXFileReference(target, product_dir, 'wrapper.application', 'BUILT_PRODUCTS_DIR')
def config_octest_target(self):
conf = XCBuildConfiguration('waf', {}, None)
conf.config_octest()
self.buildConfigurationList = XCConfigurationList([conf])
self.productType = "com.apple.product-type.bundle"
class PBXSourcesBuildPhase(XCodeNode):
def __init__(self):
XCodeNode.__init__(self)
self.buildActionMask = 2147483647
self.runOnlyForDeploymentPostprocessing = 0
self.files = []
def add_files(self, files):
for f in files:
_, ext = os.path.splitext(f.name)
if ext in SOURCE_EXT:
bf = PBXBuildFile(f)
self.files.append(bf)
class PBXBuildFile(XCodeNode):
def __init__(self, fileRef):
XCodeNode.__init__(self)
self.fileRef = fileRef
# Root project object
class PBXProject(XCodeNode):
def __init__(self, name, version):
XCodeNode.__init__(self)
self.buildConfigurationList = XCConfigurationList([XCBuildConfiguration('waf', {})])
self.compatibilityVersion = version[0]
self.hasScannedForEncodings = 1;
self.mainGroup = PBXGroup(name)
self.projectRoot = ""
self.projectDirPath = ""
self.targets = []
self._objectVersion = version[1]
self._output = PBXGroup('out')
self.mainGroup.children.append(self._output)
def write(self, file):
w = file.write
w("// !$*UTF8*$!\n")
w("{\n")
w("\tarchiveVersion = 1;\n")
w("\tclasses = {\n")
w("\t};\n")
w("\tobjectVersion = %d;\n" % self._objectVersion)
w("\tobjects = {\n\n")
XCodeNode.write(self, file)
w("\t};\n")
w("\trootObject = %s;\n" % self._id)
w("}\n")
class xcode_pebble(Build.BuildContext):
"""creates an xcode project file"""
cmd = 'xcode'
fun = 'build'
def collect_source(self, tg):
source_files = tg.to_nodes(getattr(tg, 'source', []))
plist_files = tg.to_nodes(getattr(tg, 'mac_plist', []))
resource_files = [tg.path.find_node(i) for i in Utils.to_list(getattr(tg, 'mac_resources', []))]
include_dirs = Utils.to_list(getattr(tg, 'includes', [])) + Utils.to_list(getattr(tg, 'export_dirs', []))
include_files = []
for x in include_dirs:
if not isinstance(x, str):
include_files.append(x)
continue
d = tg.path.find_node(x)
if d:
lst = [y for y in d.ant_glob(HEADERS_GLOB, flat=False)]
include_files.extend(lst)
# remove duplicates
source = list(set(source_files + plist_files + resource_files + include_files))
source.sort(key=lambda x: x.abspath())
return source
def execute(self):
"""
Entry point
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
root = os.path.basename(self.srcnode.abspath())
appname = getattr(Context.g_module, Context.APPNAME, root)
p = PBXProject(appname, ('Xcode 3.2', 46))
# Xcode Target that invokes waf-xcode.sh:
target = PBXLegacyTarget('waf')
p.targets.append(target)
# Add references to all files:
p.mainGroup.path = "../"
files = p.mainGroup.add_all_files_from_folder_path(self.srcnode.abspath())
# FIXME: How to get SDK path?
sdk_path = os.path.join(os.path.dirname(Context.__file__), '..', '..')
if sdk_path and os.path.exists(sdk_path):
sdk_include_path = os.path.abspath(os.path.join(sdk_path, 'include'))
if os.path.exists(sdk_include_path):
sdk_headers = p.mainGroup.add_all_files_from_folder_path(sdk_include_path)
files.extend(sdk_headers)
# Create dummy native app that is needed to trigger Xcode's code completion + indexing:
index_dummy_target = PBXNativeTarget(None, "index_dummy", productType="com.apple.product-type.tool")
index_dummy_sources_phase = PBXSourcesBuildPhase()
index_dummy_sources_phase.add_files(files)
index_dummy_target.buildPhases.append(index_dummy_sources_phase)
p.targets.append(index_dummy_target)
# Create fake .octest bundle to invoke ./waf test:
clar_tests_target = PBXNativeTarget(None, "clar_tests", script="export ACTION=test\n./waf-xcode.sh")
clar_tests_target.config_octest_target()
p.targets.append(clar_tests_target)
# Xcode Target that invokes waf test
target = PBXLegacyTarget('waf test')
target.buildArgumentsString = "test"
p.targets.append(target)
# Write generated project to disk:
node = self.srcnode.make_node('xcode/%s.xcodeproj' % appname)
node.mkdir()
node = node.make_node('project.pbxproj')
p.write(open(node.abspath(), 'w'))
# Generate waf-xcode.sh shim script
xcscript_node=self.srcnode.make_node('xcode/waf-xcode.sh')
xcscript_path=xcscript_node.abspath()
f = open(xcscript_path,'w')
f.write("#!/bin/bash\n\
# Expecting PebbleSDK + arm toolchain + openocd binaries to be in $PATH after sourcing .bash_profile:\n\
export PATH=`python ../tools/strip_xcode_paths.py`\n\
source ~/.bash_profile\n\
cd ..\n\
ACTION=$@\n\
if [ -z $ACTION ]; then\n\
ACTION=build\n\
fi\n\
# Use pypy if available\n\
if ! which pypy &> /dev/null; then\n\
# Check if waf is on the path:\n\
if ! type \"waf\" &> /dev/null; then\n\
./waf $ACTION\n\
else\n\
waf $ACTION\n\
fi\n\
else\n\
echo \"Using pypy\"\n\
pypy waf $ACTION\n\
fi\n\
")
os.chmod(xcscript_path, 0755)
f.close()