Added submodule contents into tree
This commit is contained in:
27
externals/mbedtls/tests/scripts/all-in-docker.sh
vendored
Executable file
27
externals/mbedtls/tests/scripts/all-in-docker.sh
vendored
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# all-in-docker.sh
|
||||
#
|
||||
# Purpose
|
||||
# -------
|
||||
# This runs all.sh (except for armcc) in a Docker container.
|
||||
#
|
||||
# WARNING: the Dockerfile used by this script is no longer maintained! See
|
||||
# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
|
||||
# for the set of Docker images we use on the CI.
|
||||
#
|
||||
# Notes for users
|
||||
# ---------------
|
||||
# See docker_env.sh for prerequisites and other information.
|
||||
#
|
||||
# See also all.sh for notes about invocation of that script.
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
source tests/scripts/docker_env.sh
|
||||
|
||||
# Run tests that are possible with openly available compilers
|
||||
run_in_docker tests/scripts/all.sh \
|
||||
--no-armcc \
|
||||
$@
|
||||
6382
externals/mbedtls/tests/scripts/all.sh
vendored
Executable file
6382
externals/mbedtls/tests/scripts/all.sh
vendored
Executable file
File diff suppressed because it is too large
Load Diff
720
externals/mbedtls/tests/scripts/analyze_outcomes.py
vendored
Executable file
720
externals/mbedtls/tests/scripts/analyze_outcomes.py
vendored
Executable file
@@ -0,0 +1,720 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Analyze the test outcomes from a full CI run.
|
||||
|
||||
This script can also run on outcomes from a partial run, but the results are
|
||||
less likely to be useful.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import traceback
|
||||
import re
|
||||
import subprocess
|
||||
import os
|
||||
import typing
|
||||
|
||||
import check_test_cases
|
||||
|
||||
|
||||
# `ComponentOutcomes` is a named tuple which is defined as:
|
||||
# ComponentOutcomes(
|
||||
# successes = {
|
||||
# "<suite_case>",
|
||||
# ...
|
||||
# },
|
||||
# failures = {
|
||||
# "<suite_case>",
|
||||
# ...
|
||||
# }
|
||||
# )
|
||||
# suite_case = "<suite>;<case>"
|
||||
ComponentOutcomes = typing.NamedTuple('ComponentOutcomes',
|
||||
[('successes', typing.Set[str]),
|
||||
('failures', typing.Set[str])])
|
||||
|
||||
# `Outcomes` is a representation of the outcomes file,
|
||||
# which defined as:
|
||||
# Outcomes = {
|
||||
# "<component>": ComponentOutcomes,
|
||||
# ...
|
||||
# }
|
||||
Outcomes = typing.Dict[str, ComponentOutcomes]
|
||||
|
||||
|
||||
class Results:
|
||||
"""Process analysis results."""
|
||||
|
||||
def __init__(self):
|
||||
self.error_count = 0
|
||||
self.warning_count = 0
|
||||
|
||||
def new_section(self, fmt, *args, **kwargs):
|
||||
self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs)
|
||||
|
||||
def info(self, fmt, *args, **kwargs):
|
||||
self._print_line('Info: ' + fmt, *args, **kwargs)
|
||||
|
||||
def error(self, fmt, *args, **kwargs):
|
||||
self.error_count += 1
|
||||
self._print_line('Error: ' + fmt, *args, **kwargs)
|
||||
|
||||
def warning(self, fmt, *args, **kwargs):
|
||||
self.warning_count += 1
|
||||
self._print_line('Warning: ' + fmt, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _print_line(fmt, *args, **kwargs):
|
||||
sys.stderr.write((fmt + '\n').format(*args, **kwargs))
|
||||
|
||||
def execute_reference_driver_tests(results: Results, ref_component: str, driver_component: str, \
|
||||
outcome_file: str) -> None:
|
||||
"""Run the tests specified in ref_component and driver_component. Results
|
||||
are stored in the output_file and they will be used for the following
|
||||
coverage analysis"""
|
||||
results.new_section("Test {} and {}", ref_component, driver_component)
|
||||
|
||||
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
|
||||
" " + ref_component + " " + driver_component
|
||||
results.info("Running: {}", shell_command)
|
||||
ret_val = subprocess.run(shell_command.split(), check=False).returncode
|
||||
|
||||
if ret_val != 0:
|
||||
results.error("failed to run reference/driver components")
|
||||
|
||||
def analyze_coverage(results: Results, outcomes: Outcomes,
|
||||
allow_list: typing.List[str], full_coverage: bool) -> None:
|
||||
"""Check that all available test cases are executed at least once."""
|
||||
available = check_test_cases.collect_available_test_cases()
|
||||
for suite_case in available:
|
||||
hit = any(suite_case in comp_outcomes.successes or
|
||||
suite_case in comp_outcomes.failures
|
||||
for comp_outcomes in outcomes.values())
|
||||
|
||||
if not hit and suite_case not in allow_list:
|
||||
if full_coverage:
|
||||
results.error('Test case not executed: {}', suite_case)
|
||||
else:
|
||||
results.warning('Test case not executed: {}', suite_case)
|
||||
elif hit and suite_case in allow_list:
|
||||
# Test Case should be removed from the allow list.
|
||||
if full_coverage:
|
||||
results.error('Allow listed test case was executed: {}', suite_case)
|
||||
else:
|
||||
results.warning('Allow listed test case was executed: {}', suite_case)
|
||||
|
||||
def name_matches_pattern(name: str, str_or_re) -> bool:
|
||||
"""Check if name matches a pattern, that may be a string or regex.
|
||||
- If the pattern is a string, name must be equal to match.
|
||||
- If the pattern is a regex, name must fully match.
|
||||
"""
|
||||
# The CI's python is too old for re.Pattern
|
||||
#if isinstance(str_or_re, re.Pattern):
|
||||
if not isinstance(str_or_re, str):
|
||||
return str_or_re.fullmatch(name) is not None
|
||||
else:
|
||||
return str_or_re == name
|
||||
|
||||
def analyze_driver_vs_reference(results: Results, outcomes: Outcomes,
|
||||
component_ref: str, component_driver: str,
|
||||
ignored_suites: typing.List[str], ignored_tests=None) -> None:
|
||||
"""Check that all tests passing in the reference component are also
|
||||
passing in the corresponding driver component.
|
||||
Skip:
|
||||
- full test suites provided in ignored_suites list
|
||||
- only some specific test inside a test suite, for which the corresponding
|
||||
output string is provided
|
||||
"""
|
||||
ref_outcomes = outcomes.get("component_" + component_ref)
|
||||
driver_outcomes = outcomes.get("component_" + component_driver)
|
||||
|
||||
if ref_outcomes is None or driver_outcomes is None:
|
||||
results.error("required components are missing: bad outcome file?")
|
||||
return
|
||||
|
||||
if not ref_outcomes.successes:
|
||||
results.error("no passing test in reference component: bad outcome file?")
|
||||
return
|
||||
|
||||
for suite_case in ref_outcomes.successes:
|
||||
# suite_case is like "test_suite_foo.bar;Description of test case"
|
||||
(full_test_suite, test_string) = suite_case.split(';')
|
||||
test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name
|
||||
|
||||
# Immediately skip fully-ignored test suites
|
||||
if test_suite in ignored_suites or full_test_suite in ignored_suites:
|
||||
continue
|
||||
|
||||
# For ignored test cases inside test suites, just remember and:
|
||||
# don't issue an error if they're skipped with drivers,
|
||||
# but issue an error if they're not (means we have a bad entry).
|
||||
ignored = False
|
||||
if full_test_suite in ignored_tests:
|
||||
for str_or_re in ignored_tests[full_test_suite]:
|
||||
if name_matches_pattern(test_string, str_or_re):
|
||||
ignored = True
|
||||
|
||||
if not ignored and not suite_case in driver_outcomes.successes:
|
||||
results.error("PASS -> SKIP/FAIL: {}", suite_case)
|
||||
if ignored and suite_case in driver_outcomes.successes:
|
||||
results.error("uselessly ignored: {}", suite_case)
|
||||
|
||||
def analyze_outcomes(results: Results, outcomes: Outcomes, args) -> None:
|
||||
"""Run all analyses on the given outcome collection."""
|
||||
analyze_coverage(results, outcomes, args['allow_list'],
|
||||
args['full_coverage'])
|
||||
|
||||
def read_outcome_file(outcome_file: str) -> Outcomes:
|
||||
"""Parse an outcome file and return an outcome collection.
|
||||
"""
|
||||
outcomes = {}
|
||||
with open(outcome_file, 'r', encoding='utf-8') as input_file:
|
||||
for line in input_file:
|
||||
(_platform, component, suite, case, result, _cause) = line.split(';')
|
||||
# Note that `component` is not unique. If a test case passes on Linux
|
||||
# and fails on FreeBSD, it'll end up in both the successes set and
|
||||
# the failures set.
|
||||
suite_case = ';'.join([suite, case])
|
||||
if component not in outcomes:
|
||||
outcomes[component] = ComponentOutcomes(set(), set())
|
||||
if result == 'PASS':
|
||||
outcomes[component].successes.add(suite_case)
|
||||
elif result == 'FAIL':
|
||||
outcomes[component].failures.add(suite_case)
|
||||
|
||||
return outcomes
|
||||
|
||||
def do_analyze_coverage(results: Results, outcomes: Outcomes, args) -> None:
|
||||
"""Perform coverage analysis."""
|
||||
results.new_section("Analyze coverage")
|
||||
analyze_outcomes(results, outcomes, args)
|
||||
|
||||
def do_analyze_driver_vs_reference(results: Results, outcomes: Outcomes, args) -> None:
|
||||
"""Perform driver vs reference analyze."""
|
||||
results.new_section("Analyze driver {} vs reference {}",
|
||||
args['component_driver'], args['component_ref'])
|
||||
|
||||
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
|
||||
|
||||
analyze_driver_vs_reference(results, outcomes,
|
||||
args['component_ref'], args['component_driver'],
|
||||
ignored_suites, args['ignored_tests'])
|
||||
|
||||
# List of tasks with a function that can handle this task and additional arguments if required
|
||||
KNOWN_TASKS = {
|
||||
'analyze_coverage': {
|
||||
'test_function': do_analyze_coverage,
|
||||
'args': {
|
||||
'allow_list': [
|
||||
# Algorithm not supported yet
|
||||
'test_suite_psa_crypto_metadata;Asymmetric signature: pure EdDSA',
|
||||
# Algorithm not supported yet
|
||||
'test_suite_psa_crypto_metadata;Cipher: XTS',
|
||||
],
|
||||
'full_coverage': False,
|
||||
}
|
||||
},
|
||||
# There are 2 options to use analyze_driver_vs_reference_xxx locally:
|
||||
# 1. Run tests and then analysis:
|
||||
# - tests/scripts/all.sh --outcome-file "$PWD/out.csv" <component_ref> <component_driver>
|
||||
# - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
|
||||
# 2. Let this script run both automatically:
|
||||
# - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
|
||||
'analyze_driver_vs_reference_hash': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_hash_use_psa',
|
||||
'component_driver': 'test_psa_crypto_config_accel_hash_use_psa',
|
||||
'ignored_suites': [
|
||||
'shax', 'mdx', # the software implementations that are being excluded
|
||||
'md.psa', # purposefully depends on whether drivers are present
|
||||
'psa_crypto_low_hash.generated', # testing the builtins
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_hmac': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_hmac',
|
||||
'component_driver': 'test_psa_crypto_config_accel_hmac',
|
||||
'ignored_suites': [
|
||||
# These suites require legacy hash support, which is disabled
|
||||
# in the accelerated component.
|
||||
'shax', 'mdx',
|
||||
# This suite tests builtins directly, but these are missing
|
||||
# in the accelerated case.
|
||||
'psa_crypto_low_hash.generated',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_md': [
|
||||
# Builtin HMAC is not supported in the accelerate component.
|
||||
re.compile('.*HMAC.*'),
|
||||
# Following tests make use of functions which are not available
|
||||
# when MD_C is disabled, as it happens in the accelerated
|
||||
# test component.
|
||||
re.compile('generic .* Hash file .*'),
|
||||
'MD list',
|
||||
],
|
||||
'test_suite_md.psa': [
|
||||
# "legacy only" tests require hash algorithms to be NOT
|
||||
# accelerated, but this of course false for the accelerated
|
||||
# test component.
|
||||
re.compile('PSA dispatch .* legacy only'),
|
||||
],
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_cipher_aead_cmac': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_cipher_aead_cmac',
|
||||
'component_driver': 'test_psa_crypto_config_accel_cipher_aead_cmac',
|
||||
# Modules replaced by drivers.
|
||||
'ignored_suites': [
|
||||
# low-level (block/stream) cipher modules
|
||||
'aes', 'aria', 'camellia', 'des', 'chacha20',
|
||||
# AEAD modes and CMAC
|
||||
'ccm', 'chachapoly', 'cmac', 'gcm',
|
||||
# The Cipher abstraction layer
|
||||
'cipher',
|
||||
],
|
||||
'ignored_tests': {
|
||||
# PEM decryption is not supported so far.
|
||||
# The rest of PEM (write, unencrypted read) works though.
|
||||
'test_suite_pem': [
|
||||
re.compile(r'PEM read .*(AES|DES|\bencrypt).*'),
|
||||
],
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# Following tests depend on AES_C/DES_C but are not about
|
||||
# them really, just need to know some error code is there.
|
||||
'test_suite_error': [
|
||||
'Low and high error',
|
||||
'Single low error'
|
||||
],
|
||||
# Similar to test_suite_error above.
|
||||
'test_suite_version': [
|
||||
'Check for MBEDTLS_AES_C when already present',
|
||||
],
|
||||
# The en/decryption part of PKCS#12 is not supported so far.
|
||||
# The rest of PKCS#12 (key derivation) works though.
|
||||
'test_suite_pkcs12': [
|
||||
re.compile(r'PBE Encrypt, .*'),
|
||||
re.compile(r'PBE Decrypt, .*'),
|
||||
],
|
||||
# The en/decryption part of PKCS#5 is not supported so far.
|
||||
# The rest of PKCS#5 (PBKDF2) works though.
|
||||
'test_suite_pkcs5': [
|
||||
re.compile(r'PBES2 Encrypt, .*'),
|
||||
re.compile(r'PBES2 Decrypt .*'),
|
||||
],
|
||||
# Encrypted keys are not supported so far.
|
||||
# pylint: disable=line-too-long
|
||||
'test_suite_pkparse': [
|
||||
'Key ASN1 (Encrypted key PKCS12, trailing garbage data)',
|
||||
'Key ASN1 (Encrypted key PKCS5, trailing garbage data)',
|
||||
re.compile(r'Parse (RSA|EC) Key .*\(.* ([Ee]ncrypted|password).*\)'),
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_ecp_light_only': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_ecc_ecp_light_only',
|
||||
'component_driver': 'test_psa_crypto_config_accel_ecc_ecp_light_only',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers
|
||||
'ecdsa', 'ecdh', 'ecjpake',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# This test wants a legacy function that takes f_rng, p_rng
|
||||
# arguments, and uses legacy ECDSA for that. The test is
|
||||
# really about the wrapper around the PSA RNG, not ECDSA.
|
||||
'test_suite_random': [
|
||||
'PSA classic wrapper: ECDSA signature (SECP256R1)',
|
||||
],
|
||||
# In the accelerated test ECP_C is not set (only ECP_LIGHT is)
|
||||
# so we must ignore disparities in the tests for which ECP_C
|
||||
# is required.
|
||||
'test_suite_ecp': [
|
||||
re.compile(r'ECP check public-private .*'),
|
||||
re.compile(r'ECP calculate public: .*'),
|
||||
re.compile(r'ECP gen keypair .*'),
|
||||
re.compile(r'ECP point muladd .*'),
|
||||
re.compile(r'ECP point multiplication .*'),
|
||||
re.compile(r'ECP test vectors .*'),
|
||||
],
|
||||
'test_suite_ssl': [
|
||||
# This deprecated function is only present when ECP_C is On.
|
||||
'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_no_ecp_at_all': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_ecc_no_ecp_at_all',
|
||||
'component_driver': 'test_psa_crypto_config_accel_ecc_no_ecp_at_all',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers
|
||||
'ecp', 'ecdsa', 'ecdh', 'ecjpake',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_random': [
|
||||
'PSA classic wrapper: ECDSA signature (SECP256R1)',
|
||||
],
|
||||
'test_suite_pkparse': [
|
||||
# When PK_PARSE_C and ECP_C are defined then PK_PARSE_EC_COMPRESSED
|
||||
# is automatically enabled in build_info.h (backward compatibility)
|
||||
# even if it is disabled in config_psa_crypto_no_ecp_at_all(). As a
|
||||
# consequence compressed points are supported in the reference
|
||||
# component but not in the accelerated one, so they should be skipped
|
||||
# while checking driver's coverage.
|
||||
re.compile(r'Parse EC Key .*compressed\)'),
|
||||
re.compile(r'Parse Public EC Key .*compressed\)'),
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_ssl': [
|
||||
'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_ecc_no_bignum': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_ecc_no_bignum',
|
||||
'component_driver': 'test_psa_crypto_config_accel_ecc_no_bignum',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers
|
||||
'ecp', 'ecdsa', 'ecdh', 'ecjpake',
|
||||
'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
|
||||
'bignum.generated', 'bignum.misc',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_random': [
|
||||
'PSA classic wrapper: ECDSA signature (SECP256R1)',
|
||||
],
|
||||
# See no_ecp_at_all
|
||||
'test_suite_pkparse': [
|
||||
re.compile(r'Parse EC Key .*compressed\)'),
|
||||
re.compile(r'Parse Public EC Key .*compressed\)'),
|
||||
],
|
||||
'test_suite_asn1parse': [
|
||||
'INTEGER too large for mpi',
|
||||
],
|
||||
'test_suite_asn1write': [
|
||||
re.compile(r'ASN.1 Write mpi.*'),
|
||||
],
|
||||
'test_suite_debug': [
|
||||
re.compile(r'Debug print mbedtls_mpi.*'),
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_ssl': [
|
||||
'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_ecc_ffdh_no_bignum': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_ecc_ffdh_no_bignum',
|
||||
'component_driver': 'test_psa_crypto_config_accel_ecc_ffdh_no_bignum',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers
|
||||
'ecp', 'ecdsa', 'ecdh', 'ecjpake', 'dhm',
|
||||
'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
|
||||
'bignum.generated', 'bignum.misc',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_random': [
|
||||
'PSA classic wrapper: ECDSA signature (SECP256R1)',
|
||||
],
|
||||
# See no_ecp_at_all
|
||||
'test_suite_pkparse': [
|
||||
re.compile(r'Parse EC Key .*compressed\)'),
|
||||
re.compile(r'Parse Public EC Key .*compressed\)'),
|
||||
],
|
||||
'test_suite_asn1parse': [
|
||||
'INTEGER too large for mpi',
|
||||
],
|
||||
'test_suite_asn1write': [
|
||||
re.compile(r'ASN.1 Write mpi.*'),
|
||||
],
|
||||
'test_suite_debug': [
|
||||
re.compile(r'Debug print mbedtls_mpi.*'),
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_ssl': [
|
||||
'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_ffdh_alg': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_ffdh',
|
||||
'component_driver': 'test_psa_crypto_config_accel_ffdh',
|
||||
'ignored_suites': ['dhm'],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_tfm_config': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_tfm_config',
|
||||
'component_driver': 'test_tfm_config_p256m_driver_accel_ec',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers
|
||||
'asn1parse', 'asn1write',
|
||||
'ecp', 'ecdsa', 'ecdh', 'ecjpake',
|
||||
'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
|
||||
'bignum.generated', 'bignum.misc',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# See ecp_light_only
|
||||
'test_suite_random': [
|
||||
'PSA classic wrapper: ECDSA signature (SECP256R1)',
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_driver_vs_reference_rsa': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_psa_crypto_config_reference_rsa_crypto',
|
||||
'component_driver': 'test_psa_crypto_config_accel_rsa_crypto',
|
||||
'ignored_suites': [
|
||||
# Modules replaced by drivers.
|
||||
'rsa', 'pkcs1_v15', 'pkcs1_v21',
|
||||
# We temporarily don't care about PK stuff.
|
||||
'pk', 'pkwrite', 'pkparse'
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
# Following tests depend on RSA_C but are not about
|
||||
# them really, just need to know some error code is there.
|
||||
'test_suite_error': [
|
||||
'Low and high error',
|
||||
'Single high error'
|
||||
],
|
||||
# Constant time operations only used for PKCS1_V15
|
||||
'test_suite_constant_time': [
|
||||
re.compile(r'mbedtls_ct_zeroize_if .*'),
|
||||
re.compile(r'mbedtls_ct_memmove_left .*')
|
||||
],
|
||||
'test_suite_psa_crypto': [
|
||||
# We don't support generate_key_ext entry points
|
||||
# in drivers yet.
|
||||
re.compile(r'PSA generate key ext: RSA, e=.*'),
|
||||
],
|
||||
}
|
||||
}
|
||||
},
|
||||
'analyze_block_cipher_dispatch': {
|
||||
'test_function': do_analyze_driver_vs_reference,
|
||||
'args': {
|
||||
'component_ref': 'test_full_block_cipher_legacy_dispatch',
|
||||
'component_driver': 'test_full_block_cipher_psa_dispatch',
|
||||
'ignored_suites': [
|
||||
# Skipped in the accelerated component
|
||||
'aes', 'aria', 'camellia',
|
||||
# These require AES_C, ARIA_C or CAMELLIA_C to be enabled in
|
||||
# order for the cipher module (actually cipher_wrapper) to work
|
||||
# properly. However these symbols are disabled in the accelerated
|
||||
# component so we ignore them.
|
||||
'cipher.ccm', 'cipher.gcm', 'cipher.aes', 'cipher.aria',
|
||||
'cipher.camellia',
|
||||
],
|
||||
'ignored_tests': {
|
||||
'test_suite_cmac': [
|
||||
# Following tests require AES_C/ARIA_C/CAMELLIA_C to be enabled,
|
||||
# but these are not available in the accelerated component.
|
||||
'CMAC null arguments',
|
||||
re.compile('CMAC.* (AES|ARIA|Camellia).*'),
|
||||
],
|
||||
'test_suite_cipher.padding': [
|
||||
# Following tests require AES_C/CAMELLIA_C to be enabled,
|
||||
# but these are not available in the accelerated component.
|
||||
re.compile('Set( non-existent)? padding with (AES|CAMELLIA).*'),
|
||||
],
|
||||
'test_suite_pkcs5': [
|
||||
# The AES part of PKCS#5 PBES2 is not yet supported.
|
||||
# The rest of PKCS#5 (PBKDF2) works, though.
|
||||
re.compile(r'PBES2 .* AES-.*')
|
||||
],
|
||||
'test_suite_pkparse': [
|
||||
# PEM (called by pkparse) requires AES_C in order to decrypt
|
||||
# the key, but this is not available in the accelerated
|
||||
# component.
|
||||
re.compile('Parse RSA Key.*(password|AES-).*'),
|
||||
],
|
||||
'test_suite_pem': [
|
||||
# Following tests require AES_C, but this is diabled in the
|
||||
# accelerated component.
|
||||
re.compile('PEM read .*AES.*'),
|
||||
'PEM read (unknown encryption algorithm)',
|
||||
],
|
||||
'test_suite_error': [
|
||||
# Following tests depend on AES_C but are not about them
|
||||
# really, just need to know some error code is there.
|
||||
'Single low error',
|
||||
'Low and high error',
|
||||
],
|
||||
'test_suite_version': [
|
||||
# Similar to test_suite_error above.
|
||||
'Check for MBEDTLS_AES_C when already present',
|
||||
],
|
||||
'test_suite_platform': [
|
||||
# Incompatible with sanitizers (e.g. ASan). If the driver
|
||||
# component uses a sanitizer but the reference component
|
||||
# doesn't, we have a PASS vs SKIP mismatch.
|
||||
'Check mbedtls_calloc overallocation',
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def main():
|
||||
main_results = Results()
|
||||
|
||||
try:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
|
||||
help='Outcome file to analyze')
|
||||
parser.add_argument('specified_tasks', default='all', nargs='?',
|
||||
help='Analysis to be done. By default, run all tasks. '
|
||||
'With one or more TASK, run only those. '
|
||||
'TASK can be the name of a single task or '
|
||||
'comma/space-separated list of tasks. ')
|
||||
parser.add_argument('--list', action='store_true',
|
||||
help='List all available tasks and exit.')
|
||||
parser.add_argument('--require-full-coverage', action='store_true',
|
||||
dest='full_coverage', help="Require all available "
|
||||
"test cases to be executed and issue an error "
|
||||
"otherwise. This flag is ignored if 'task' is "
|
||||
"neither 'all' nor 'analyze_coverage'")
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.list:
|
||||
for task in KNOWN_TASKS:
|
||||
print(task)
|
||||
sys.exit(0)
|
||||
|
||||
if options.specified_tasks == 'all':
|
||||
tasks_list = KNOWN_TASKS.keys()
|
||||
else:
|
||||
tasks_list = re.split(r'[, ]+', options.specified_tasks)
|
||||
for task in tasks_list:
|
||||
if task not in KNOWN_TASKS:
|
||||
sys.stderr.write('invalid task: {}\n'.format(task))
|
||||
sys.exit(2)
|
||||
|
||||
KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
|
||||
|
||||
# If the outcome file exists, parse it once and share the result
|
||||
# among tasks to improve performance.
|
||||
# Otherwise, it will be generated by execute_reference_driver_tests.
|
||||
if not os.path.exists(options.outcomes):
|
||||
if len(tasks_list) > 1:
|
||||
sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n")
|
||||
sys.exit(2)
|
||||
|
||||
task_name = tasks_list[0]
|
||||
task = KNOWN_TASKS[task_name]
|
||||
if task['test_function'] != do_analyze_driver_vs_reference: # pylint: disable=comparison-with-callable
|
||||
sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name))
|
||||
sys.exit(2)
|
||||
|
||||
execute_reference_driver_tests(main_results,
|
||||
task['args']['component_ref'],
|
||||
task['args']['component_driver'],
|
||||
options.outcomes)
|
||||
|
||||
outcomes = read_outcome_file(options.outcomes)
|
||||
|
||||
for task in tasks_list:
|
||||
test_function = KNOWN_TASKS[task]['test_function']
|
||||
test_args = KNOWN_TASKS[task]['args']
|
||||
test_function(main_results, outcomes, test_args)
|
||||
|
||||
main_results.info("Overall results: {} warnings and {} errors",
|
||||
main_results.warning_count, main_results.error_count)
|
||||
|
||||
sys.exit(0 if (main_results.error_count == 0) else 1)
|
||||
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# Print the backtrace and exit explicitly with our chosen status.
|
||||
traceback.print_exc()
|
||||
sys.exit(120)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
469
externals/mbedtls/tests/scripts/audit-validity-dates.py
vendored
Executable file
469
externals/mbedtls/tests/scripts/audit-validity-dates.py
vendored
Executable file
@@ -0,0 +1,469 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""Audit validity date of X509 crt/crl/csr.
|
||||
|
||||
This script is used to audit the validity date of crt/crl/csr used for testing.
|
||||
It prints the information about X.509 objects excluding the objects that
|
||||
are valid throughout the desired validity period. The data are collected
|
||||
from tests/data_files/ and tests/suites/*.data files by default.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import typing
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import logging
|
||||
import hashlib
|
||||
from enum import Enum
|
||||
|
||||
# The script requires cryptography >= 35.0.0 which is only available
|
||||
# for Python >= 3.6.
|
||||
import cryptography
|
||||
from cryptography import x509
|
||||
|
||||
from generate_test_code import FileWrapper
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import build_tree
|
||||
from mbedtls_dev import logging_util
|
||||
|
||||
def check_cryptography_version():
|
||||
match = re.match(r'^[0-9]+', cryptography.__version__)
|
||||
if match is None or int(match.group(0)) < 35:
|
||||
raise Exception("audit-validity-dates requires cryptography >= 35.0.0"
|
||||
+ "({} is too old)".format(cryptography.__version__))
|
||||
|
||||
class DataType(Enum):
|
||||
CRT = 1 # Certificate
|
||||
CRL = 2 # Certificate Revocation List
|
||||
CSR = 3 # Certificate Signing Request
|
||||
|
||||
|
||||
class DataFormat(Enum):
|
||||
PEM = 1 # Privacy-Enhanced Mail
|
||||
DER = 2 # Distinguished Encoding Rules
|
||||
|
||||
|
||||
class AuditData:
|
||||
"""Store data location, type and validity period of X.509 objects."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
def __init__(self, data_type: DataType, x509_obj):
|
||||
self.data_type = data_type
|
||||
# the locations that the x509 object could be found
|
||||
self.locations = [] # type: typing.List[str]
|
||||
self.fill_validity_duration(x509_obj)
|
||||
self._obj = x509_obj
|
||||
encoding = cryptography.hazmat.primitives.serialization.Encoding.DER
|
||||
self._identifier = hashlib.sha1(self._obj.public_bytes(encoding)).hexdigest()
|
||||
|
||||
@property
|
||||
def identifier(self):
|
||||
"""
|
||||
Identifier of the underlying X.509 object, which is consistent across
|
||||
different runs.
|
||||
"""
|
||||
return self._identifier
|
||||
|
||||
def fill_validity_duration(self, x509_obj):
|
||||
"""Read validity period from an X.509 object."""
|
||||
# Certificate expires after "not_valid_after"
|
||||
# Certificate is invalid before "not_valid_before"
|
||||
if self.data_type == DataType.CRT:
|
||||
self.not_valid_after = x509_obj.not_valid_after
|
||||
self.not_valid_before = x509_obj.not_valid_before
|
||||
# CertificateRevocationList expires after "next_update"
|
||||
# CertificateRevocationList is invalid before "last_update"
|
||||
elif self.data_type == DataType.CRL:
|
||||
self.not_valid_after = x509_obj.next_update
|
||||
self.not_valid_before = x509_obj.last_update
|
||||
# CertificateSigningRequest is always valid.
|
||||
elif self.data_type == DataType.CSR:
|
||||
self.not_valid_after = datetime.datetime.max
|
||||
self.not_valid_before = datetime.datetime.min
|
||||
else:
|
||||
raise ValueError("Unsupported file_type: {}".format(self.data_type))
|
||||
|
||||
|
||||
class X509Parser:
|
||||
"""A parser class to parse crt/crl/csr file or data in PEM/DER format."""
|
||||
PEM_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}(?P<data>.*?)-{5}END (?P=type)-{5}'
|
||||
PEM_TAG_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}\n'
|
||||
PEM_TAGS = {
|
||||
DataType.CRT: 'CERTIFICATE',
|
||||
DataType.CRL: 'X509 CRL',
|
||||
DataType.CSR: 'CERTIFICATE REQUEST'
|
||||
}
|
||||
|
||||
def __init__(self,
|
||||
backends:
|
||||
typing.Dict[DataType,
|
||||
typing.Dict[DataFormat,
|
||||
typing.Callable[[bytes], object]]]) \
|
||||
-> None:
|
||||
self.backends = backends
|
||||
self.__generate_parsers()
|
||||
|
||||
def __generate_parser(self, data_type: DataType):
|
||||
"""Parser generator for a specific DataType"""
|
||||
tag = self.PEM_TAGS[data_type]
|
||||
pem_loader = self.backends[data_type][DataFormat.PEM]
|
||||
der_loader = self.backends[data_type][DataFormat.DER]
|
||||
def wrapper(data: bytes):
|
||||
pem_type = X509Parser.pem_data_type(data)
|
||||
# It is in PEM format with target tag
|
||||
if pem_type == tag:
|
||||
return pem_loader(data)
|
||||
# It is in PEM format without target tag
|
||||
if pem_type:
|
||||
return None
|
||||
# It might be in DER format
|
||||
try:
|
||||
result = der_loader(data)
|
||||
except ValueError:
|
||||
result = None
|
||||
return result
|
||||
wrapper.__name__ = "{}.parser[{}]".format(type(self).__name__, tag)
|
||||
return wrapper
|
||||
|
||||
def __generate_parsers(self):
|
||||
"""Generate parsers for all support DataType"""
|
||||
self.parsers = {}
|
||||
for data_type, _ in self.PEM_TAGS.items():
|
||||
self.parsers[data_type] = self.__generate_parser(data_type)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.parsers[item]
|
||||
|
||||
@staticmethod
|
||||
def pem_data_type(data: bytes) -> typing.Optional[str]:
|
||||
"""Get the tag from the data in PEM format
|
||||
|
||||
:param data: data to be checked in binary mode.
|
||||
:return: PEM tag or "" when no tag detected.
|
||||
"""
|
||||
m = re.search(X509Parser.PEM_TAG_REGEX, data)
|
||||
if m is not None:
|
||||
return m.group('type').decode('UTF-8')
|
||||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def check_hex_string(hex_str: str) -> bool:
|
||||
"""Check if the hex string is possibly DER data."""
|
||||
hex_len = len(hex_str)
|
||||
# At least 6 hex char for 3 bytes: Type + Length + Content
|
||||
if hex_len < 6:
|
||||
return False
|
||||
# Check if Type (1 byte) is SEQUENCE.
|
||||
if hex_str[0:2] != '30':
|
||||
return False
|
||||
# Check LENGTH (1 byte) value
|
||||
content_len = int(hex_str[2:4], base=16)
|
||||
consumed = 4
|
||||
if content_len in (128, 255):
|
||||
# Indefinite or Reserved
|
||||
return False
|
||||
elif content_len > 127:
|
||||
# Definite, Long
|
||||
length_len = (content_len - 128) * 2
|
||||
content_len = int(hex_str[consumed:consumed+length_len], base=16)
|
||||
consumed += length_len
|
||||
# Check LENGTH
|
||||
if hex_len != content_len * 2 + consumed:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class Auditor:
|
||||
"""
|
||||
A base class that uses X509Parser to parse files to a list of AuditData.
|
||||
|
||||
A subclass must implement the following methods:
|
||||
- collect_default_files: Return a list of file names that are defaultly
|
||||
used for parsing (auditing). The list will be stored in
|
||||
Auditor.default_files.
|
||||
- parse_file: Method that parses a single file to a list of AuditData.
|
||||
|
||||
A subclass may override the following methods:
|
||||
- parse_bytes: Defaultly, it parses `bytes` that contains only one valid
|
||||
X.509 data(DER/PEM format) to an X.509 object.
|
||||
- walk_all: Defaultly, it iterates over all the files in the provided
|
||||
file name list, calls `parse_file` for each file and stores the results
|
||||
by extending the `results` passed to the function.
|
||||
"""
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
self.default_files = self.collect_default_files()
|
||||
self.parser = X509Parser({
|
||||
DataType.CRT: {
|
||||
DataFormat.PEM: x509.load_pem_x509_certificate,
|
||||
DataFormat.DER: x509.load_der_x509_certificate
|
||||
},
|
||||
DataType.CRL: {
|
||||
DataFormat.PEM: x509.load_pem_x509_crl,
|
||||
DataFormat.DER: x509.load_der_x509_crl
|
||||
},
|
||||
DataType.CSR: {
|
||||
DataFormat.PEM: x509.load_pem_x509_csr,
|
||||
DataFormat.DER: x509.load_der_x509_csr
|
||||
},
|
||||
})
|
||||
|
||||
def collect_default_files(self) -> typing.List[str]:
|
||||
"""Collect the default files for parsing."""
|
||||
raise NotImplementedError
|
||||
|
||||
def parse_file(self, filename: str) -> typing.List[AuditData]:
|
||||
"""
|
||||
Parse a list of AuditData from file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def parse_bytes(self, data: bytes):
|
||||
"""Parse AuditData from bytes."""
|
||||
for data_type in list(DataType):
|
||||
try:
|
||||
result = self.parser[data_type](data)
|
||||
except ValueError as val_error:
|
||||
result = None
|
||||
self.logger.warning(val_error)
|
||||
if result is not None:
|
||||
audit_data = AuditData(data_type, result)
|
||||
return audit_data
|
||||
return None
|
||||
|
||||
def walk_all(self,
|
||||
results: typing.Dict[str, AuditData],
|
||||
file_list: typing.Optional[typing.List[str]] = None) \
|
||||
-> None:
|
||||
"""
|
||||
Iterate over all the files in the list and get audit data. The
|
||||
results will be written to `results` passed to this function.
|
||||
|
||||
:param results: The dictionary used to store the parsed
|
||||
AuditData. The keys of this dictionary should
|
||||
be the identifier of the AuditData.
|
||||
"""
|
||||
if file_list is None:
|
||||
file_list = self.default_files
|
||||
for filename in file_list:
|
||||
data_list = self.parse_file(filename)
|
||||
for d in data_list:
|
||||
if d.identifier in results:
|
||||
results[d.identifier].locations.extend(d.locations)
|
||||
else:
|
||||
results[d.identifier] = d
|
||||
|
||||
@staticmethod
|
||||
def find_test_dir():
|
||||
"""Get the relative path for the Mbed TLS test directory."""
|
||||
return os.path.relpath(build_tree.guess_mbedtls_root() + '/tests')
|
||||
|
||||
|
||||
class TestDataAuditor(Auditor):
|
||||
"""Class for auditing files in `tests/data_files/`"""
|
||||
|
||||
def collect_default_files(self):
|
||||
"""Collect all files in `tests/data_files/`"""
|
||||
test_dir = self.find_test_dir()
|
||||
test_data_glob = os.path.join(test_dir, 'data_files/**')
|
||||
data_files = [f for f in glob.glob(test_data_glob, recursive=True)
|
||||
if os.path.isfile(f)]
|
||||
return data_files
|
||||
|
||||
def parse_file(self, filename: str) -> typing.List[AuditData]:
|
||||
"""
|
||||
Parse a list of AuditData from data file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
results = []
|
||||
# Try to parse all PEM blocks.
|
||||
is_pem = False
|
||||
for idx, m in enumerate(re.finditer(X509Parser.PEM_REGEX, data, flags=re.S), 1):
|
||||
is_pem = True
|
||||
result = self.parse_bytes(data[m.start():m.end()])
|
||||
if result is not None:
|
||||
result.locations.append("{}#{}".format(filename, idx))
|
||||
results.append(result)
|
||||
|
||||
# Might be DER format.
|
||||
if not is_pem:
|
||||
result = self.parse_bytes(data)
|
||||
if result is not None:
|
||||
result.locations.append("{}".format(filename))
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def parse_suite_data(data_f):
|
||||
"""
|
||||
Parses .data file for test arguments that possiblly have a
|
||||
valid X.509 data. If you need a more precise parser, please
|
||||
use generate_test_code.parse_test_data instead.
|
||||
|
||||
:param data_f: file object of the data file.
|
||||
:return: Generator that yields test function argument list.
|
||||
"""
|
||||
for line in data_f:
|
||||
line = line.strip()
|
||||
# Skip comments
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
|
||||
# Check parameters line
|
||||
match = re.search(r'\A\w+(.*:)?\"', line)
|
||||
if match:
|
||||
# Read test vectors
|
||||
parts = re.split(r'(?<!\\):', line)
|
||||
parts = [x for x in parts if x]
|
||||
args = parts[1:]
|
||||
yield args
|
||||
|
||||
|
||||
class SuiteDataAuditor(Auditor):
|
||||
"""Class for auditing files in `tests/suites/*.data`"""
|
||||
|
||||
def collect_default_files(self):
|
||||
"""Collect all files in `tests/suites/*.data`"""
|
||||
test_dir = self.find_test_dir()
|
||||
suites_data_folder = os.path.join(test_dir, 'suites')
|
||||
data_files = glob.glob(os.path.join(suites_data_folder, '*.data'))
|
||||
return data_files
|
||||
|
||||
def parse_file(self, filename: str):
|
||||
"""
|
||||
Parse a list of AuditData from test suite data file.
|
||||
|
||||
:param filename: name of the file to parse.
|
||||
:return list of AuditData parsed from the file.
|
||||
"""
|
||||
audit_data_list = []
|
||||
data_f = FileWrapper(filename)
|
||||
for test_args in parse_suite_data(data_f):
|
||||
for idx, test_arg in enumerate(test_args):
|
||||
match = re.match(r'"(?P<data>[0-9a-fA-F]+)"', test_arg)
|
||||
if not match:
|
||||
continue
|
||||
if not X509Parser.check_hex_string(match.group('data')):
|
||||
continue
|
||||
audit_data = self.parse_bytes(bytes.fromhex(match.group('data')))
|
||||
if audit_data is None:
|
||||
continue
|
||||
audit_data.locations.append("{}:{}:#{}".format(filename,
|
||||
data_f.line_no,
|
||||
idx + 1))
|
||||
audit_data_list.append(audit_data)
|
||||
|
||||
return audit_data_list
|
||||
|
||||
|
||||
def list_all(audit_data: AuditData):
|
||||
for loc in audit_data.locations:
|
||||
print("{}\t{:20}\t{:20}\t{:3}\t{}".format(
|
||||
audit_data.identifier,
|
||||
audit_data.not_valid_before.isoformat(timespec='seconds'),
|
||||
audit_data.not_valid_after.isoformat(timespec='seconds'),
|
||||
audit_data.data_type.name,
|
||||
loc))
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Perform argument parsing.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
|
||||
parser.add_argument('-a', '--all',
|
||||
action='store_true',
|
||||
help='list the information of all the files')
|
||||
parser.add_argument('-v', '--verbose',
|
||||
action='store_true', dest='verbose',
|
||||
help='show logs')
|
||||
parser.add_argument('--from', dest='start_date',
|
||||
help=('Start of desired validity period (UTC, YYYY-MM-DD). '
|
||||
'Default: today'),
|
||||
metavar='DATE')
|
||||
parser.add_argument('--to', dest='end_date',
|
||||
help=('End of desired validity period (UTC, YYYY-MM-DD). '
|
||||
'Default: --from'),
|
||||
metavar='DATE')
|
||||
parser.add_argument('--data-files', action='append', nargs='*',
|
||||
help='data files to audit',
|
||||
metavar='FILE')
|
||||
parser.add_argument('--suite-data-files', action='append', nargs='*',
|
||||
help='suite data files to audit',
|
||||
metavar='FILE')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# start main routine
|
||||
# setup logger
|
||||
logger = logging.getLogger()
|
||||
logging_util.configure_logger(logger)
|
||||
logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
|
||||
|
||||
td_auditor = TestDataAuditor(logger)
|
||||
sd_auditor = SuiteDataAuditor(logger)
|
||||
|
||||
data_files = []
|
||||
suite_data_files = []
|
||||
if args.data_files is None and args.suite_data_files is None:
|
||||
data_files = td_auditor.default_files
|
||||
suite_data_files = sd_auditor.default_files
|
||||
else:
|
||||
if args.data_files is not None:
|
||||
data_files = [x for l in args.data_files for x in l]
|
||||
if args.suite_data_files is not None:
|
||||
suite_data_files = [x for l in args.suite_data_files for x in l]
|
||||
|
||||
# validity period start date
|
||||
if args.start_date:
|
||||
start_date = datetime.datetime.fromisoformat(args.start_date)
|
||||
else:
|
||||
start_date = datetime.datetime.today()
|
||||
# validity period end date
|
||||
if args.end_date:
|
||||
end_date = datetime.datetime.fromisoformat(args.end_date)
|
||||
else:
|
||||
end_date = start_date
|
||||
|
||||
# go through all the files
|
||||
audit_results = {}
|
||||
td_auditor.walk_all(audit_results, data_files)
|
||||
sd_auditor.walk_all(audit_results, suite_data_files)
|
||||
|
||||
logger.info("Total: {} objects found!".format(len(audit_results)))
|
||||
|
||||
# we filter out the files whose validity duration covers the provided
|
||||
# duration.
|
||||
filter_func = lambda d: (start_date < d.not_valid_before) or \
|
||||
(d.not_valid_after < end_date)
|
||||
|
||||
sortby_end = lambda d: d.not_valid_after
|
||||
|
||||
if args.all:
|
||||
filter_func = None
|
||||
|
||||
# filter and output the results
|
||||
for d in sorted(filter(filter_func, audit_results.values()), key=sortby_end):
|
||||
list_all(d)
|
||||
|
||||
logger.debug("Done!")
|
||||
|
||||
check_cryptography_version()
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
250
externals/mbedtls/tests/scripts/basic-build-test.sh
vendored
Executable file
250
externals/mbedtls/tests/scripts/basic-build-test.sh
vendored
Executable file
@@ -0,0 +1,250 @@
|
||||
#!/bin/sh
|
||||
|
||||
# basic-build-test.sh
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# Executes the basic test suites, captures the results, and generates a simple
|
||||
# test report and code coverage report.
|
||||
#
|
||||
# The tests include:
|
||||
# * Unit tests - executed using tests/scripts/run-test-suite.pl
|
||||
# * Self-tests - executed using the test suites above
|
||||
# * System tests - executed using tests/ssl-opt.sh
|
||||
# * Interoperability tests - executed using tests/compat.sh
|
||||
#
|
||||
# The tests focus on functionality and do not consider performance.
|
||||
#
|
||||
# Note the tests self-adapt due to configurations in include/mbedtls/mbedtls_config.h
|
||||
# which can lead to some tests being skipped, and can cause the number of
|
||||
# available tests to fluctuate.
|
||||
#
|
||||
# This script has been written to be generic and should work on any shell.
|
||||
#
|
||||
# Usage: basic-build-test.sh
|
||||
#
|
||||
|
||||
# Abort on errors (and uninitiliased variables)
|
||||
set -eu
|
||||
|
||||
if [ -d library -a -d include -a -d tests ]; then :; else
|
||||
echo "Must be run from Mbed TLS root" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
: ${OPENSSL:="openssl"}
|
||||
: ${GNUTLS_CLI:="gnutls-cli"}
|
||||
: ${GNUTLS_SERV:="gnutls-serv"}
|
||||
|
||||
# Used to make ssl-opt.sh deterministic.
|
||||
#
|
||||
# See also RELEASE_SEED in all.sh. Debugging is easier if both values are kept
|
||||
# in sync. If you change the value here because it breaks some tests, you'll
|
||||
# definitely want to change it in all.sh as well.
|
||||
: ${SEED:=1}
|
||||
export SEED
|
||||
|
||||
# if MAKEFLAGS is not set add the -j option to speed up invocations of make
|
||||
if [ -z "${MAKEFLAGS+set}" ]; then
|
||||
export MAKEFLAGS="-j"
|
||||
fi
|
||||
|
||||
# To avoid setting OpenSSL and GnuTLS for each call to compat.sh and ssl-opt.sh
|
||||
# we just export the variables they require
|
||||
export OPENSSL="$OPENSSL"
|
||||
export GNUTLS_CLI="$GNUTLS_CLI"
|
||||
export GNUTLS_SERV="$GNUTLS_SERV"
|
||||
|
||||
CONFIG_H='include/mbedtls/mbedtls_config.h'
|
||||
CONFIG_BAK="$CONFIG_H.bak"
|
||||
|
||||
# Step 0 - print build environment info
|
||||
OPENSSL="$OPENSSL" \
|
||||
GNUTLS_CLI="$GNUTLS_CLI" \
|
||||
GNUTLS_SERV="$GNUTLS_SERV" \
|
||||
scripts/output_env.sh
|
||||
echo
|
||||
|
||||
# Step 1 - Make and instrumented build for code coverage
|
||||
export CFLAGS=' --coverage -g3 -O0 '
|
||||
export LDFLAGS=' --coverage'
|
||||
make clean
|
||||
cp "$CONFIG_H" "$CONFIG_BAK"
|
||||
scripts/config.py full
|
||||
make
|
||||
|
||||
|
||||
# Step 2 - Execute the tests
|
||||
TEST_OUTPUT=out_${PPID}
|
||||
cd tests
|
||||
if [ ! -f "seedfile" ]; then
|
||||
dd if=/dev/urandom of="seedfile" bs=64 count=1
|
||||
fi
|
||||
echo
|
||||
|
||||
# Step 2a - Unit Tests (keep going even if some tests fail)
|
||||
echo '################ Unit tests ################'
|
||||
perl scripts/run-test-suites.pl -v 2 |tee unit-test-$TEST_OUTPUT
|
||||
echo '^^^^^^^^^^^^^^^^ Unit tests ^^^^^^^^^^^^^^^^'
|
||||
echo
|
||||
|
||||
# Step 2b - System Tests (keep going even if some tests fail)
|
||||
echo
|
||||
echo '################ ssl-opt.sh ################'
|
||||
echo "ssl-opt.sh will use SEED=$SEED for udp_proxy"
|
||||
sh ssl-opt.sh |tee sys-test-$TEST_OUTPUT
|
||||
echo '^^^^^^^^^^^^^^^^ ssl-opt.sh ^^^^^^^^^^^^^^^^'
|
||||
echo
|
||||
|
||||
# Step 2c - Compatibility tests (keep going even if some tests fail)
|
||||
echo '################ compat.sh ################'
|
||||
{
|
||||
echo '#### compat.sh: Default versions'
|
||||
sh compat.sh
|
||||
echo
|
||||
|
||||
echo '#### compat.sh: null cipher'
|
||||
sh compat.sh -e '^$' -f 'NULL'
|
||||
echo
|
||||
|
||||
echo '#### compat.sh: next (ARIA, ChaCha)'
|
||||
OPENSSL="$OPENSSL_NEXT" sh compat.sh -e '^$' -f 'ARIA\|CHACHA'
|
||||
echo
|
||||
} | tee compat-test-$TEST_OUTPUT
|
||||
echo '^^^^^^^^^^^^^^^^ compat.sh ^^^^^^^^^^^^^^^^'
|
||||
echo
|
||||
|
||||
# Step 3 - Process the coverage report
|
||||
cd ..
|
||||
{
|
||||
make lcov
|
||||
echo SUCCESS
|
||||
} | tee tests/cov-$TEST_OUTPUT
|
||||
|
||||
if [ "$(tail -n1 tests/cov-$TEST_OUTPUT)" != "SUCCESS" ]; then
|
||||
echo >&2 "Fatal: 'make lcov' failed"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
# Step 4 - Summarise the test report
|
||||
echo
|
||||
echo "========================================================================="
|
||||
echo "Test Report Summary"
|
||||
echo
|
||||
|
||||
# A failure of the left-hand side of a pipe is ignored (this is a limitation
|
||||
# of sh). We'll use the presence of this file as a marker that the generation
|
||||
# of the report succeeded.
|
||||
rm -f "tests/basic-build-test-$$.ok"
|
||||
|
||||
{
|
||||
|
||||
cd tests
|
||||
|
||||
# Step 4a - Unit tests
|
||||
echo "Unit tests - tests/scripts/run-test-suites.pl"
|
||||
|
||||
PASSED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/test cases passed :[\t]*\([0-9]*\)/\1/p'| tr -d ' ')
|
||||
SKIPPED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/skipped :[ \t]*\([0-9]*\)/\1/p'| tr -d ' ')
|
||||
TOTAL_SUITES=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) .*, [0-9]* tests run)/\1/p'| tr -d ' ')
|
||||
FAILED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/failed :[\t]*\([0-9]*\)/\1/p' |tr -d ' ')
|
||||
|
||||
echo "No test suites : $TOTAL_SUITES"
|
||||
echo "Passed : $PASSED_TESTS"
|
||||
echo "Failed : $FAILED_TESTS"
|
||||
echo "Skipped : $SKIPPED_TESTS"
|
||||
echo "Total exec'd tests : $(($PASSED_TESTS + $FAILED_TESTS))"
|
||||
echo "Total avail tests : $(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))"
|
||||
echo
|
||||
|
||||
TOTAL_PASS=$PASSED_TESTS
|
||||
TOTAL_FAIL=$FAILED_TESTS
|
||||
TOTAL_SKIP=$SKIPPED_TESTS
|
||||
TOTAL_AVAIL=$(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))
|
||||
TOTAL_EXED=$(($PASSED_TESTS + $FAILED_TESTS))
|
||||
|
||||
# Step 4b - TLS Options tests
|
||||
echo "TLS Options tests - tests/ssl-opt.sh"
|
||||
|
||||
PASSED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p')
|
||||
SKIPPED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p')
|
||||
TOTAL_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p')
|
||||
FAILED_TESTS=$(($TOTAL_TESTS - $PASSED_TESTS))
|
||||
|
||||
echo "Passed : $PASSED_TESTS"
|
||||
echo "Failed : $FAILED_TESTS"
|
||||
echo "Skipped : $SKIPPED_TESTS"
|
||||
echo "Total exec'd tests : $TOTAL_TESTS"
|
||||
echo "Total avail tests : $(($TOTAL_TESTS + $SKIPPED_TESTS))"
|
||||
echo
|
||||
|
||||
TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
|
||||
TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
|
||||
TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
|
||||
TOTAL_AVAIL=$(($TOTAL_AVAIL + $TOTAL_TESTS + $SKIPPED_TESTS))
|
||||
TOTAL_EXED=$(($TOTAL_EXED + $TOTAL_TESTS))
|
||||
|
||||
|
||||
# Step 4c - System Compatibility tests
|
||||
echo "System/Compatibility tests - tests/compat.sh"
|
||||
|
||||
PASSED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
|
||||
SKIPPED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
|
||||
EXED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
|
||||
FAILED_TESTS=$(($EXED_TESTS - $PASSED_TESTS))
|
||||
|
||||
echo "Passed : $PASSED_TESTS"
|
||||
echo "Failed : $FAILED_TESTS"
|
||||
echo "Skipped : $SKIPPED_TESTS"
|
||||
echo "Total exec'd tests : $EXED_TESTS"
|
||||
echo "Total avail tests : $(($EXED_TESTS + $SKIPPED_TESTS))"
|
||||
echo
|
||||
|
||||
TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
|
||||
TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
|
||||
TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
|
||||
TOTAL_AVAIL=$(($TOTAL_AVAIL + $EXED_TESTS + $SKIPPED_TESTS))
|
||||
TOTAL_EXED=$(($TOTAL_EXED + $EXED_TESTS))
|
||||
|
||||
|
||||
# Step 4d - Grand totals
|
||||
echo "-------------------------------------------------------------------------"
|
||||
echo "Total tests"
|
||||
|
||||
echo "Total Passed : $TOTAL_PASS"
|
||||
echo "Total Failed : $TOTAL_FAIL"
|
||||
echo "Total Skipped : $TOTAL_SKIP"
|
||||
echo "Total exec'd tests : $TOTAL_EXED"
|
||||
echo "Total avail tests : $TOTAL_AVAIL"
|
||||
echo
|
||||
|
||||
|
||||
# Step 4e - Coverage report
|
||||
echo "Coverage statistics:"
|
||||
sed -n '1,/^Overall coverage/d; /%/p' cov-$TEST_OUTPUT
|
||||
echo
|
||||
|
||||
rm unit-test-$TEST_OUTPUT
|
||||
rm sys-test-$TEST_OUTPUT
|
||||
rm compat-test-$TEST_OUTPUT
|
||||
rm cov-$TEST_OUTPUT
|
||||
|
||||
# Mark the report generation as having succeeded. This must be the
|
||||
# last thing in the report generation.
|
||||
touch "basic-build-test-$$.ok"
|
||||
} | tee coverage-summary.txt
|
||||
|
||||
make clean
|
||||
|
||||
if [ -f "$CONFIG_BAK" ]; then
|
||||
mv "$CONFIG_BAK" "$CONFIG_H"
|
||||
fi
|
||||
|
||||
# The file must exist, otherwise it means something went wrong while generating
|
||||
# the coverage report. If something did go wrong, rm will complain so this
|
||||
# script will exit with a failure status.
|
||||
rm "tests/basic-build-test-$$.ok"
|
||||
37
externals/mbedtls/tests/scripts/basic-in-docker.sh
vendored
Executable file
37
externals/mbedtls/tests/scripts/basic-in-docker.sh
vendored
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# basic-in-docker.sh
|
||||
#
|
||||
# Purpose
|
||||
# -------
|
||||
# This runs sanity checks and library tests in a Docker container. The tests
|
||||
# are run for both clang and gcc. The testing includes a full test run
|
||||
# in the default configuration, partial test runs in the reference
|
||||
# configurations, and some dependency tests.
|
||||
#
|
||||
# WARNING: the Dockerfile used by this script is no longer maintained! See
|
||||
# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
|
||||
# for the set of Docker images we use on the CI.
|
||||
#
|
||||
# Notes for users
|
||||
# ---------------
|
||||
# See docker_env.sh for prerequisites and other information.
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
source tests/scripts/docker_env.sh
|
||||
|
||||
run_in_docker tests/scripts/all.sh 'check_*'
|
||||
|
||||
for compiler in clang gcc; do
|
||||
run_in_docker -e CC=${compiler} cmake -D CMAKE_BUILD_TYPE:String="Check" .
|
||||
run_in_docker -e CC=${compiler} make
|
||||
run_in_docker -e CC=${compiler} make test
|
||||
run_in_docker programs/test/selftest
|
||||
run_in_docker -e OSSL_NO_DTLS=1 tests/compat.sh
|
||||
run_in_docker tests/ssl-opt.sh -e '\(DTLS\|SCSV\).*openssl'
|
||||
run_in_docker tests/scripts/test-ref-configs.pl
|
||||
run_in_docker tests/scripts/depends.py curves
|
||||
run_in_docker tests/scripts/depends.py kex
|
||||
done
|
||||
67
externals/mbedtls/tests/scripts/check-doxy-blocks.pl
vendored
Executable file
67
externals/mbedtls/tests/scripts/check-doxy-blocks.pl
vendored
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# Detect comment blocks that are likely meant to be doxygen blocks but aren't.
|
||||
#
|
||||
# More precisely, look for normal comment block containing '\'.
|
||||
# Of course one could use doxygen warnings, eg with:
|
||||
# sed -e '/EXTRACT/s/YES/NO/' doxygen/mbedtls.doxyfile | doxygen -
|
||||
# but that would warn about any undocumented item, while our goal is to find
|
||||
# items that are documented, but not marked as such by mistake.
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
use File::Basename;
|
||||
|
||||
# C/header files in the following directories will be checked
|
||||
my @directories = qw(include/mbedtls library doxygen/input);
|
||||
|
||||
# very naive pattern to find directives:
|
||||
# everything with a backslach except '\0' and backslash at EOL
|
||||
my $doxy_re = qr/\\(?!0|\n)/;
|
||||
|
||||
# Return an error code to the environment if a potential error in the
|
||||
# source code is found.
|
||||
my $exit_code = 0;
|
||||
|
||||
sub check_file {
|
||||
my ($fname) = @_;
|
||||
open my $fh, '<', $fname or die "Failed to open '$fname': $!\n";
|
||||
|
||||
# first line of the last normal comment block,
|
||||
# or 0 if not in a normal comment block
|
||||
my $block_start = 0;
|
||||
while (my $line = <$fh>) {
|
||||
$block_start = $. if $line =~ m/\/\*(?![*!])/;
|
||||
$block_start = 0 if $line =~ m/\*\//;
|
||||
if ($block_start and $line =~ m/$doxy_re/) {
|
||||
print "$fname:$block_start: directive on line $.\n";
|
||||
$block_start = 0; # report only one directive per block
|
||||
$exit_code = 1;
|
||||
}
|
||||
}
|
||||
|
||||
close $fh;
|
||||
}
|
||||
|
||||
sub check_dir {
|
||||
my ($dirname) = @_;
|
||||
for my $file (<$dirname/*.[ch]>) {
|
||||
check_file($file);
|
||||
}
|
||||
}
|
||||
|
||||
# Check that the script is being run from the project's root directory.
|
||||
for my $dir (@directories) {
|
||||
if (! -d $dir) {
|
||||
die "This script must be run from the Mbed TLS root directory";
|
||||
} else {
|
||||
check_dir($dir)
|
||||
}
|
||||
}
|
||||
|
||||
exit $exit_code;
|
||||
|
||||
__END__
|
||||
146
externals/mbedtls/tests/scripts/check-generated-files.sh
vendored
Executable file
146
externals/mbedtls/tests/scripts/check-generated-files.sh
vendored
Executable file
@@ -0,0 +1,146 @@
|
||||
#! /usr/bin/env sh
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# Check if generated files are up-to-date.
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ne 0 ] && [ "$1" = "--help" ]; then
|
||||
cat <<EOF
|
||||
$0 [-l | -u]
|
||||
This script checks that all generated file are up-to-date. If some aren't, by
|
||||
default the scripts reports it and exits in error; with the -u option, it just
|
||||
updates them instead.
|
||||
|
||||
-u Update the files rather than return an error for out-of-date files.
|
||||
-l List generated files, but do not update them.
|
||||
EOF
|
||||
exit
|
||||
fi
|
||||
|
||||
in_mbedtls_repo () {
|
||||
test -d include -a -d library -a -d programs -a -d tests
|
||||
}
|
||||
|
||||
in_tf_psa_crypto_repo () {
|
||||
test -d include -a -d core -a -d drivers -a -d programs -a -d tests
|
||||
}
|
||||
|
||||
if in_mbedtls_repo; then
|
||||
library_dir='library'
|
||||
elif in_tf_psa_crypto_repo; then
|
||||
library_dir='core'
|
||||
else
|
||||
echo "Must be run from Mbed TLS root or TF-PSA-Crypto root" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
UPDATE=
|
||||
LIST=
|
||||
while getopts lu OPTLET; do
|
||||
case $OPTLET in
|
||||
l) LIST=1;;
|
||||
u) UPDATE=1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# check SCRIPT FILENAME[...]
|
||||
# check SCRIPT DIRECTORY
|
||||
# Run SCRIPT and check that it does not modify any of the specified files.
|
||||
# In the first form, there can be any number of FILENAMEs, which must be
|
||||
# regular files.
|
||||
# In the second form, there must be a single DIRECTORY, standing for the
|
||||
# list of files in the directory. Running SCRIPT must not modify any file
|
||||
# in the directory and must not add or remove files either.
|
||||
# If $UPDATE is empty, abort with an error status if a file is modified.
|
||||
check()
|
||||
{
|
||||
SCRIPT=$1
|
||||
shift
|
||||
|
||||
if [ -n "$LIST" ]; then
|
||||
printf '%s\n' "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
directory=
|
||||
if [ -d "$1" ]; then
|
||||
directory="$1"
|
||||
rm -f "$directory"/*.bak
|
||||
set -- "$1"/*
|
||||
fi
|
||||
|
||||
for FILE in "$@"; do
|
||||
if [ -e "$FILE" ]; then
|
||||
cp -p "$FILE" "$FILE.bak"
|
||||
else
|
||||
rm -f "$FILE.bak"
|
||||
fi
|
||||
done
|
||||
|
||||
"$SCRIPT"
|
||||
|
||||
# Compare the script output to the old files and remove backups
|
||||
for FILE in "$@"; do
|
||||
if diff "$FILE" "$FILE.bak" >/dev/null 2>&1; then
|
||||
# Move the original file back so that $FILE's timestamp doesn't
|
||||
# change (avoids spurious rebuilds with make).
|
||||
mv "$FILE.bak" "$FILE"
|
||||
else
|
||||
echo "'$FILE' was either modified or deleted by '$SCRIPT'"
|
||||
if [ -z "$UPDATE" ]; then
|
||||
exit 1
|
||||
else
|
||||
rm -f "$FILE.bak"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$directory" ]; then
|
||||
old_list="$*"
|
||||
set -- "$directory"/*
|
||||
new_list="$*"
|
||||
# Check if there are any new files
|
||||
if [ "$old_list" != "$new_list" ]; then
|
||||
echo "Files were deleted or created by '$SCRIPT'"
|
||||
echo "Before: $old_list"
|
||||
echo "After: $new_list"
|
||||
if [ -z "$UPDATE" ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Note: if the format of calls to the "check" function changes, update
|
||||
# scripts/code_style.py accordingly. For generated C source files (*.h or *.c),
|
||||
# the format must be "check SCRIPT FILENAME...". For other source files,
|
||||
# any shell syntax is permitted (including e.g. command substitution).
|
||||
|
||||
# Note: Instructions to generate those files are replicated in:
|
||||
# - **/Makefile (to (re)build them with make)
|
||||
# - **/CMakeLists.txt (to (re)build them with cmake)
|
||||
# - scripts/make_generated_files.bat (to generate them under Windows)
|
||||
|
||||
# These checks are common to Mbed TLS and TF-PSA-Crypto
|
||||
check scripts/generate_psa_constants.py programs/psa/psa_constant_names_generated.c
|
||||
check tests/scripts/generate_bignum_tests.py $(tests/scripts/generate_bignum_tests.py --list)
|
||||
check tests/scripts/generate_ecp_tests.py $(tests/scripts/generate_ecp_tests.py --list)
|
||||
check tests/scripts/generate_psa_tests.py $(tests/scripts/generate_psa_tests.py --list)
|
||||
check scripts/generate_driver_wrappers.py $library_dir/psa_crypto_driver_wrappers.h $library_dir/psa_crypto_driver_wrappers_no_static.c
|
||||
|
||||
# Additional checks for Mbed TLS only
|
||||
if in_mbedtls_repo; then
|
||||
check scripts/generate_errors.pl library/error.c
|
||||
check scripts/generate_query_config.pl programs/test/query_config.c
|
||||
check scripts/generate_features.pl library/version_features.c
|
||||
check scripts/generate_ssl_debug_helpers.py library/ssl_debug_helpers_generated.c
|
||||
# generate_visualc_files enumerates source files (library/*.c). It doesn't
|
||||
# care about their content, but the files must exist. So it must run after
|
||||
# the step that creates or updates these files.
|
||||
check scripts/generate_visualc_files.pl visualc/VS2013
|
||||
fi
|
||||
68
externals/mbedtls/tests/scripts/check-python-files.sh
vendored
Executable file
68
externals/mbedtls/tests/scripts/check-python-files.sh
vendored
Executable file
@@ -0,0 +1,68 @@
|
||||
#! /usr/bin/env sh
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
# Purpose: check Python files for potential programming errors or maintenance
|
||||
# hurdles. Run pylint to detect some potential mistakes and enforce PEP8
|
||||
# coding standards. Run mypy to perform static type checking.
|
||||
|
||||
# We'll keep going on errors and report the status at the end.
|
||||
ret=0
|
||||
|
||||
if type python3 >/dev/null 2>/dev/null; then
|
||||
PYTHON=python3
|
||||
else
|
||||
PYTHON=python
|
||||
fi
|
||||
|
||||
check_version () {
|
||||
$PYTHON - "$2" <<EOF
|
||||
import packaging.version
|
||||
import sys
|
||||
import $1 as package
|
||||
actual = package.__version__
|
||||
wanted = sys.argv[1]
|
||||
if packaging.version.parse(actual) < packaging.version.parse(wanted):
|
||||
sys.stderr.write("$1: version %s is too old (want %s)\n" % (actual, wanted))
|
||||
exit(1)
|
||||
EOF
|
||||
}
|
||||
|
||||
can_pylint () {
|
||||
# Pylint 1.5.2 from Ubuntu 16.04 is too old:
|
||||
# E: 34, 0: Unable to import 'mbedtls_dev' (import-error)
|
||||
# Pylint 1.8.3 from Ubuntu 18.04 passed on the first commit containing this line.
|
||||
check_version pylint 1.8.3
|
||||
}
|
||||
|
||||
can_mypy () {
|
||||
# mypy 0.770 is too old:
|
||||
# tests/scripts/test_psa_constant_names.py:34: error: Cannot find implementation or library stub for module named 'mbedtls_dev'
|
||||
# mypy 0.780 from pip passed on the first commit containing this line.
|
||||
check_version mypy.version 0.780
|
||||
}
|
||||
|
||||
# With just a --can-xxx option, check whether the tool for xxx is available
|
||||
# with an acceptable version, and exit without running any checks. The exit
|
||||
# status is true if the tool is available and acceptable and false otherwise.
|
||||
if [ "$1" = "--can-pylint" ]; then
|
||||
can_pylint
|
||||
exit
|
||||
elif [ "$1" = "--can-mypy" ]; then
|
||||
can_mypy
|
||||
exit
|
||||
fi
|
||||
|
||||
echo 'Running pylint ...'
|
||||
$PYTHON -m pylint scripts/mbedtls_dev/*.py scripts/*.py tests/scripts/*.py || {
|
||||
echo >&2 "pylint reported errors"
|
||||
ret=1
|
||||
}
|
||||
|
||||
echo
|
||||
echo 'Running mypy ...'
|
||||
$PYTHON -m mypy scripts/*.py tests/scripts/*.py ||
|
||||
ret=1
|
||||
|
||||
exit $ret
|
||||
518
externals/mbedtls/tests/scripts/check_files.py
vendored
Executable file
518
externals/mbedtls/tests/scripts/check_files.py
vendored
Executable file
@@ -0,0 +1,518 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
This script checks the current state of the source code for minor issues,
|
||||
including incorrect file permissions, presence of tabs, non-Unix line endings,
|
||||
trailing whitespace, and presence of UTF-8 BOM.
|
||||
Note: requires python 3, must be run from Mbed TLS root.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import codecs
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
try:
|
||||
from typing import FrozenSet, Optional, Pattern # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import build_tree
|
||||
|
||||
|
||||
class FileIssueTracker:
|
||||
"""Base class for file-wide issue tracking.
|
||||
|
||||
To implement a checker that processes a file as a whole, inherit from
|
||||
this class and implement `check_file_for_issue` and define ``heading``.
|
||||
|
||||
``suffix_exemptions``: files whose name ends with a string in this set
|
||||
will not be checked.
|
||||
|
||||
``path_exemptions``: files whose path (relative to the root of the source
|
||||
tree) matches this regular expression will not be checked. This can be
|
||||
``None`` to match no path. Paths are normalized and converted to ``/``
|
||||
separators before matching.
|
||||
|
||||
``heading``: human-readable description of the issue
|
||||
"""
|
||||
|
||||
suffix_exemptions = frozenset() #type: FrozenSet[str]
|
||||
path_exemptions = None #type: Optional[Pattern[str]]
|
||||
# heading must be defined in derived classes.
|
||||
# pylint: disable=no-member
|
||||
|
||||
def __init__(self):
|
||||
self.files_with_issues = {}
|
||||
|
||||
@staticmethod
|
||||
def normalize_path(filepath):
|
||||
"""Normalize ``filepath`` with / as the directory separator."""
|
||||
filepath = os.path.normpath(filepath)
|
||||
# On Windows, we may have backslashes to separate directories.
|
||||
# We need slashes to match exemption lists.
|
||||
seps = os.path.sep
|
||||
if os.path.altsep is not None:
|
||||
seps += os.path.altsep
|
||||
return '/'.join(filepath.split(seps))
|
||||
|
||||
def should_check_file(self, filepath):
|
||||
"""Whether the given file name should be checked.
|
||||
|
||||
Files whose name ends with a string listed in ``self.suffix_exemptions``
|
||||
or whose path matches ``self.path_exemptions`` will not be checked.
|
||||
"""
|
||||
for files_exemption in self.suffix_exemptions:
|
||||
if filepath.endswith(files_exemption):
|
||||
return False
|
||||
if self.path_exemptions and \
|
||||
re.match(self.path_exemptions, self.normalize_path(filepath)):
|
||||
return False
|
||||
return True
|
||||
|
||||
def check_file_for_issue(self, filepath):
|
||||
"""Check the specified file for the issue that this class is for.
|
||||
|
||||
Subclasses must implement this method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def record_issue(self, filepath, line_number):
|
||||
"""Record that an issue was found at the specified location."""
|
||||
if filepath not in self.files_with_issues.keys():
|
||||
self.files_with_issues[filepath] = []
|
||||
self.files_with_issues[filepath].append(line_number)
|
||||
|
||||
def output_file_issues(self, logger):
|
||||
"""Log all the locations where the issue was found."""
|
||||
if self.files_with_issues.values():
|
||||
logger.info(self.heading)
|
||||
for filename, lines in sorted(self.files_with_issues.items()):
|
||||
if lines:
|
||||
logger.info("{}: {}".format(
|
||||
filename, ", ".join(str(x) for x in lines)
|
||||
))
|
||||
else:
|
||||
logger.info(filename)
|
||||
logger.info("")
|
||||
|
||||
BINARY_FILE_PATH_RE_LIST = [
|
||||
r'docs/.*\.pdf\Z',
|
||||
r'docs/.*\.png\Z',
|
||||
r'programs/fuzz/corpuses/[^.]+\Z',
|
||||
r'tests/data_files/[^.]+\Z',
|
||||
r'tests/data_files/.*\.(crt|csr|db|der|key|pubkey)\Z',
|
||||
r'tests/data_files/.*\.req\.[^/]+\Z',
|
||||
r'tests/data_files/.*malformed[^/]+\Z',
|
||||
r'tests/data_files/format_pkcs12\.fmt\Z',
|
||||
r'tests/data_files/.*\.bin\Z',
|
||||
]
|
||||
BINARY_FILE_PATH_RE = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST))
|
||||
|
||||
class LineIssueTracker(FileIssueTracker):
|
||||
"""Base class for line-by-line issue tracking.
|
||||
|
||||
To implement a checker that processes files line by line, inherit from
|
||||
this class and implement `line_with_issue`.
|
||||
"""
|
||||
|
||||
# Exclude binary files.
|
||||
path_exemptions = BINARY_FILE_PATH_RE
|
||||
|
||||
def issue_with_line(self, line, filepath, line_number):
|
||||
"""Check the specified line for the issue that this class is for.
|
||||
|
||||
Subclasses must implement this method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def check_file_line(self, filepath, line, line_number):
|
||||
if self.issue_with_line(line, filepath, line_number):
|
||||
self.record_issue(filepath, line_number)
|
||||
|
||||
def check_file_for_issue(self, filepath):
|
||||
"""Check the lines of the specified file.
|
||||
|
||||
Subclasses must implement the ``issue_with_line`` method.
|
||||
"""
|
||||
with open(filepath, "rb") as f:
|
||||
for i, line in enumerate(iter(f.readline, b"")):
|
||||
self.check_file_line(filepath, line, i + 1)
|
||||
|
||||
|
||||
def is_windows_file(filepath):
|
||||
_root, ext = os.path.splitext(filepath)
|
||||
return ext in ('.bat', '.dsp', '.dsw', '.sln', '.vcxproj')
|
||||
|
||||
|
||||
class ShebangIssueTracker(FileIssueTracker):
|
||||
"""Track files with a bad, missing or extraneous shebang line.
|
||||
|
||||
Executable scripts must start with a valid shebang (#!) line.
|
||||
"""
|
||||
|
||||
heading = "Invalid shebang line:"
|
||||
|
||||
# Allow either /bin/sh, /bin/bash, or /usr/bin/env.
|
||||
# Allow at most one argument (this is a Linux limitation).
|
||||
# For sh and bash, the argument if present must be options.
|
||||
# For env, the argument must be the base name of the interpreter.
|
||||
_shebang_re = re.compile(rb'^#! ?(?:/bin/(bash|sh)(?: -[^\n ]*)?'
|
||||
rb'|/usr/bin/env ([^\n /]+))$')
|
||||
_extensions = {
|
||||
b'bash': 'sh',
|
||||
b'perl': 'pl',
|
||||
b'python3': 'py',
|
||||
b'sh': 'sh',
|
||||
}
|
||||
|
||||
def is_valid_shebang(self, first_line, filepath):
|
||||
m = re.match(self._shebang_re, first_line)
|
||||
if not m:
|
||||
return False
|
||||
interpreter = m.group(1) or m.group(2)
|
||||
if interpreter not in self._extensions:
|
||||
return False
|
||||
if not filepath.endswith('.' + self._extensions[interpreter]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def check_file_for_issue(self, filepath):
|
||||
is_executable = os.access(filepath, os.X_OK)
|
||||
with open(filepath, "rb") as f:
|
||||
first_line = f.readline()
|
||||
if first_line.startswith(b'#!'):
|
||||
if not is_executable:
|
||||
# Shebang on a non-executable file
|
||||
self.files_with_issues[filepath] = None
|
||||
elif not self.is_valid_shebang(first_line, filepath):
|
||||
self.files_with_issues[filepath] = [1]
|
||||
elif is_executable:
|
||||
# Executable without a shebang
|
||||
self.files_with_issues[filepath] = None
|
||||
|
||||
|
||||
class EndOfFileNewlineIssueTracker(FileIssueTracker):
|
||||
"""Track files that end with an incomplete line
|
||||
(no newline character at the end of the last line)."""
|
||||
|
||||
heading = "Missing newline at end of file:"
|
||||
|
||||
path_exemptions = BINARY_FILE_PATH_RE
|
||||
|
||||
def check_file_for_issue(self, filepath):
|
||||
with open(filepath, "rb") as f:
|
||||
try:
|
||||
f.seek(-1, 2)
|
||||
except OSError:
|
||||
# This script only works on regular files. If we can't seek
|
||||
# 1 before the end, it means that this position is before
|
||||
# the beginning of the file, i.e. that the file is empty.
|
||||
return
|
||||
if f.read(1) != b"\n":
|
||||
self.files_with_issues[filepath] = None
|
||||
|
||||
|
||||
class Utf8BomIssueTracker(FileIssueTracker):
|
||||
"""Track files that start with a UTF-8 BOM.
|
||||
Files should be ASCII or UTF-8. Valid UTF-8 does not start with a BOM."""
|
||||
|
||||
heading = "UTF-8 BOM present:"
|
||||
|
||||
suffix_exemptions = frozenset([".vcxproj", ".sln"])
|
||||
path_exemptions = BINARY_FILE_PATH_RE
|
||||
|
||||
def check_file_for_issue(self, filepath):
|
||||
with open(filepath, "rb") as f:
|
||||
if f.read().startswith(codecs.BOM_UTF8):
|
||||
self.files_with_issues[filepath] = None
|
||||
|
||||
|
||||
class UnicodeIssueTracker(LineIssueTracker):
|
||||
"""Track lines with invalid characters or invalid text encoding."""
|
||||
|
||||
heading = "Invalid UTF-8 or forbidden character:"
|
||||
|
||||
# Only allow valid UTF-8, and only other explicitly allowed characters.
|
||||
# We deliberately exclude all characters that aren't a simple non-blank,
|
||||
# non-zero-width glyph, apart from a very small set (tab, ordinary space,
|
||||
# line breaks, "basic" no-break space and soft hyphen). In particular,
|
||||
# non-ASCII control characters, combinig characters, and Unicode state
|
||||
# changes (e.g. right-to-left text) are forbidden.
|
||||
# Note that we do allow some characters with a risk of visual confusion,
|
||||
# for example '-' (U+002D HYPHEN-MINUS) vs '' (U+00AD SOFT HYPHEN) vs
|
||||
# '‐' (U+2010 HYPHEN), or 'A' (U+0041 LATIN CAPITAL LETTER A) vs
|
||||
# 'Α' (U+0391 GREEK CAPITAL LETTER ALPHA).
|
||||
GOOD_CHARACTERS = ''.join([
|
||||
'\t\n\r -~', # ASCII (tabs and line endings are checked separately)
|
||||
'\u00A0-\u00FF', # Latin-1 Supplement (for NO-BREAK SPACE and punctuation)
|
||||
'\u2010-\u2027\u2030-\u205E', # General Punctuation (printable)
|
||||
'\u2070\u2071\u2074-\u208E\u2090-\u209C', # Superscripts and Subscripts
|
||||
'\u2190-\u21FF', # Arrows
|
||||
'\u2200-\u22FF', # Mathematical Symbols
|
||||
'\u2500-\u257F' # Box Drawings characters used in markdown trees
|
||||
])
|
||||
# Allow any of the characters and ranges above, and anything classified
|
||||
# as a word constituent.
|
||||
GOOD_CHARACTERS_RE = re.compile(r'[\w{}]+\Z'.format(GOOD_CHARACTERS))
|
||||
|
||||
def issue_with_line(self, line, _filepath, line_number):
|
||||
try:
|
||||
text = line.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
return True
|
||||
if line_number == 1 and text.startswith('\uFEFF'):
|
||||
# Strip BOM (U+FEFF ZERO WIDTH NO-BREAK SPACE) at the beginning.
|
||||
# Which files are allowed to have a BOM is handled in
|
||||
# Utf8BomIssueTracker.
|
||||
text = text[1:]
|
||||
return not self.GOOD_CHARACTERS_RE.match(text)
|
||||
|
||||
class UnixLineEndingIssueTracker(LineIssueTracker):
|
||||
"""Track files with non-Unix line endings (i.e. files with CR)."""
|
||||
|
||||
heading = "Non-Unix line endings:"
|
||||
|
||||
def should_check_file(self, filepath):
|
||||
if not super().should_check_file(filepath):
|
||||
return False
|
||||
return not is_windows_file(filepath)
|
||||
|
||||
def issue_with_line(self, line, _filepath, _line_number):
|
||||
return b"\r" in line
|
||||
|
||||
|
||||
class WindowsLineEndingIssueTracker(LineIssueTracker):
|
||||
"""Track files with non-Windows line endings (i.e. CR or LF not in CRLF)."""
|
||||
|
||||
heading = "Non-Windows line endings:"
|
||||
|
||||
def should_check_file(self, filepath):
|
||||
if not super().should_check_file(filepath):
|
||||
return False
|
||||
return is_windows_file(filepath)
|
||||
|
||||
def issue_with_line(self, line, _filepath, _line_number):
|
||||
return not line.endswith(b"\r\n") or b"\r" in line[:-2]
|
||||
|
||||
|
||||
class TrailingWhitespaceIssueTracker(LineIssueTracker):
|
||||
"""Track lines with trailing whitespace."""
|
||||
|
||||
heading = "Trailing whitespace:"
|
||||
suffix_exemptions = frozenset([".dsp", ".md"])
|
||||
|
||||
def issue_with_line(self, line, _filepath, _line_number):
|
||||
return line.rstrip(b"\r\n") != line.rstrip()
|
||||
|
||||
|
||||
class TabIssueTracker(LineIssueTracker):
|
||||
"""Track lines with tabs."""
|
||||
|
||||
heading = "Tabs present:"
|
||||
suffix_exemptions = frozenset([
|
||||
".make",
|
||||
".pem", # some openssl dumps have tabs
|
||||
".sln",
|
||||
"/Makefile",
|
||||
"/Makefile.inc",
|
||||
"/generate_visualc_files.pl",
|
||||
])
|
||||
|
||||
def issue_with_line(self, line, _filepath, _line_number):
|
||||
return b"\t" in line
|
||||
|
||||
|
||||
class MergeArtifactIssueTracker(LineIssueTracker):
|
||||
"""Track lines with merge artifacts.
|
||||
These are leftovers from a ``git merge`` that wasn't fully edited."""
|
||||
|
||||
heading = "Merge artifact:"
|
||||
|
||||
def issue_with_line(self, line, _filepath, _line_number):
|
||||
# Detect leftover git conflict markers.
|
||||
if line.startswith(b'<<<<<<< ') or line.startswith(b'>>>>>>> '):
|
||||
return True
|
||||
if line.startswith(b'||||||| '): # from merge.conflictStyle=diff3
|
||||
return True
|
||||
if line.rstrip(b'\r\n') == b'=======' and \
|
||||
not _filepath.endswith('.md'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def this_location():
|
||||
frame = inspect.currentframe()
|
||||
assert frame is not None
|
||||
info = inspect.getframeinfo(frame)
|
||||
return os.path.basename(info.filename), info.lineno
|
||||
THIS_FILE_BASE_NAME, LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER = this_location()
|
||||
|
||||
class LicenseIssueTracker(LineIssueTracker):
|
||||
"""Check copyright statements and license indications.
|
||||
|
||||
This class only checks that statements are correct if present. It does
|
||||
not enforce the presence of statements in each file.
|
||||
"""
|
||||
|
||||
heading = "License issue:"
|
||||
|
||||
LICENSE_EXEMPTION_RE_LIST = [
|
||||
# Third-party code, other than whitelisted third-party modules,
|
||||
# may be under a different license.
|
||||
r'3rdparty/(?!(p256-m)/.*)',
|
||||
# Documentation explaining the license may have accidental
|
||||
# false positives.
|
||||
r'(ChangeLog|LICENSE|[-0-9A-Z_a-z]+\.md)\Z',
|
||||
# Files imported from TF-M, and not used except in test builds,
|
||||
# may be under a different license.
|
||||
r'configs/ext/crypto_config_profile_medium\.h\Z',
|
||||
r'configs/ext/tfm_mbedcrypto_config_profile_medium\.h\Z',
|
||||
r'configs/ext/README\.md\Z',
|
||||
# Third-party file.
|
||||
r'dco\.txt\Z',
|
||||
]
|
||||
path_exemptions = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST +
|
||||
LICENSE_EXEMPTION_RE_LIST))
|
||||
|
||||
COPYRIGHT_HOLDER = rb'The Mbed TLS Contributors'
|
||||
# Catch "Copyright foo", "Copyright (C) foo", "Copyright © foo", etc.
|
||||
COPYRIGHT_RE = re.compile(rb'.*\bcopyright\s+((?:\w|\s|[()]|[^ -~])*\w)', re.I)
|
||||
|
||||
SPDX_HEADER_KEY = b'SPDX-License-Identifier'
|
||||
LICENSE_IDENTIFIER = b'Apache-2.0 OR GPL-2.0-or-later'
|
||||
SPDX_RE = re.compile(br'.*?(' +
|
||||
re.escape(SPDX_HEADER_KEY) +
|
||||
br')(:\s*(.*?)\W*\Z|.*)', re.I)
|
||||
|
||||
LICENSE_MENTION_RE = re.compile(rb'.*(?:' + rb'|'.join([
|
||||
rb'Apache License',
|
||||
rb'General Public License',
|
||||
]) + rb')', re.I)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Record what problem was caused. We can't easily report it due to
|
||||
# the structure of the script. To be fixed after
|
||||
# https://github.com/Mbed-TLS/mbedtls/pull/2506
|
||||
self.problem = None
|
||||
|
||||
def issue_with_line(self, line, filepath, line_number):
|
||||
#pylint: disable=too-many-return-statements
|
||||
|
||||
# Use endswith() rather than the more correct os.path.basename()
|
||||
# because experimentally, it makes a significant difference to
|
||||
# the running time.
|
||||
if filepath.endswith(THIS_FILE_BASE_NAME) and \
|
||||
line_number > LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER:
|
||||
# Avoid false positives from the code in this class.
|
||||
# Also skip the rest of this file, which is highly unlikely to
|
||||
# contain any problematic statements since we put those near the
|
||||
# top of files.
|
||||
return False
|
||||
|
||||
m = self.COPYRIGHT_RE.match(line)
|
||||
if m and m.group(1) != self.COPYRIGHT_HOLDER:
|
||||
self.problem = 'Invalid copyright line'
|
||||
return True
|
||||
|
||||
m = self.SPDX_RE.match(line)
|
||||
if m:
|
||||
if m.group(1) != self.SPDX_HEADER_KEY:
|
||||
self.problem = 'Misspelled ' + self.SPDX_HEADER_KEY.decode()
|
||||
return True
|
||||
if not m.group(3):
|
||||
self.problem = 'Improperly formatted SPDX license identifier'
|
||||
return True
|
||||
if m.group(3) != self.LICENSE_IDENTIFIER:
|
||||
self.problem = 'Wrong SPDX license identifier'
|
||||
return True
|
||||
|
||||
m = self.LICENSE_MENTION_RE.match(line)
|
||||
if m:
|
||||
self.problem = 'Suspicious license mention'
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class IntegrityChecker:
|
||||
"""Sanity-check files under the current directory."""
|
||||
|
||||
def __init__(self, log_file):
|
||||
"""Instantiate the sanity checker.
|
||||
Check files under the current directory.
|
||||
Write a report of issues to log_file."""
|
||||
build_tree.check_repo_path()
|
||||
self.logger = None
|
||||
self.setup_logger(log_file)
|
||||
self.issues_to_check = [
|
||||
ShebangIssueTracker(),
|
||||
EndOfFileNewlineIssueTracker(),
|
||||
Utf8BomIssueTracker(),
|
||||
UnicodeIssueTracker(),
|
||||
UnixLineEndingIssueTracker(),
|
||||
WindowsLineEndingIssueTracker(),
|
||||
TrailingWhitespaceIssueTracker(),
|
||||
TabIssueTracker(),
|
||||
MergeArtifactIssueTracker(),
|
||||
LicenseIssueTracker(),
|
||||
]
|
||||
|
||||
def setup_logger(self, log_file, level=logging.INFO):
|
||||
self.logger = logging.getLogger()
|
||||
self.logger.setLevel(level)
|
||||
if log_file:
|
||||
handler = logging.FileHandler(log_file)
|
||||
self.logger.addHandler(handler)
|
||||
else:
|
||||
console = logging.StreamHandler()
|
||||
self.logger.addHandler(console)
|
||||
|
||||
@staticmethod
|
||||
def collect_files():
|
||||
bytes_output = subprocess.check_output(['git', 'ls-files', '-z'])
|
||||
bytes_filepaths = bytes_output.split(b'\0')[:-1]
|
||||
ascii_filepaths = map(lambda fp: fp.decode('ascii'), bytes_filepaths)
|
||||
# Prepend './' to files in the top-level directory so that
|
||||
# something like `'/Makefile' in fp` matches in the top-level
|
||||
# directory as well as in subdirectories.
|
||||
return [fp if os.path.dirname(fp) else os.path.join(os.curdir, fp)
|
||||
for fp in ascii_filepaths]
|
||||
|
||||
def check_files(self):
|
||||
for issue_to_check in self.issues_to_check:
|
||||
for filepath in self.collect_files():
|
||||
if issue_to_check.should_check_file(filepath):
|
||||
issue_to_check.check_file_for_issue(filepath)
|
||||
|
||||
def output_issues(self):
|
||||
integrity_return_code = 0
|
||||
for issue_to_check in self.issues_to_check:
|
||||
if issue_to_check.files_with_issues:
|
||||
integrity_return_code = 1
|
||||
issue_to_check.output_file_issues(self.logger)
|
||||
return integrity_return_code
|
||||
|
||||
|
||||
def run_main():
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
"-l", "--log_file", type=str, help="path to optional output log",
|
||||
)
|
||||
check_args = parser.parse_args()
|
||||
integrity_check = IntegrityChecker(check_args.log_file)
|
||||
integrity_check.check_files()
|
||||
return_code = integrity_check.output_issues()
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_main()
|
||||
965
externals/mbedtls/tests/scripts/check_names.py
vendored
Executable file
965
externals/mbedtls/tests/scripts/check_names.py
vendored
Executable file
@@ -0,0 +1,965 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
This script confirms that the naming of all symbols and identifiers in Mbed TLS
|
||||
are consistent with the house style and are also self-consistent. It only runs
|
||||
on Linux and macOS since it depends on nm.
|
||||
|
||||
It contains two major Python classes, CodeParser and NameChecker. They both have
|
||||
a comprehensive "run-all" function (comprehensive_parse() and perform_checks())
|
||||
but the individual functions can also be used for specific needs.
|
||||
|
||||
CodeParser makes heavy use of regular expressions to parse the code, and is
|
||||
dependent on the current code formatting. Many Python C parser libraries require
|
||||
preprocessed C code, which means no macro parsing. Compiler tools are also not
|
||||
very helpful when we want the exact location in the original source (which
|
||||
becomes impossible when e.g. comments are stripped).
|
||||
|
||||
NameChecker performs the following checks:
|
||||
|
||||
- All exported and available symbols in the library object files, are explicitly
|
||||
declared in the header files. This uses the nm command.
|
||||
- All macros, constants, and identifiers (function names, struct names, etc)
|
||||
follow the required regex pattern.
|
||||
- Typo checking: All words that begin with MBED|PSA exist as macros or constants.
|
||||
|
||||
The script returns 0 on success, 1 on test failure, and 2 if there is a script
|
||||
error. It must be run from Mbed TLS root.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import argparse
|
||||
import fnmatch
|
||||
import glob
|
||||
import textwrap
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import re
|
||||
import enum
|
||||
import shutil
|
||||
import subprocess
|
||||
import logging
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import build_tree
|
||||
|
||||
|
||||
# Naming patterns to check against. These are defined outside the NameCheck
|
||||
# class for ease of modification.
|
||||
PUBLIC_MACRO_PATTERN = r"^(MBEDTLS|PSA)_[0-9A-Z_]*[0-9A-Z]$"
|
||||
INTERNAL_MACRO_PATTERN = r"^[0-9A-Za-z_]*[0-9A-Z]$"
|
||||
CONSTANTS_PATTERN = PUBLIC_MACRO_PATTERN
|
||||
IDENTIFIER_PATTERN = r"^(mbedtls|psa)_[0-9a-z_]*[0-9a-z]$"
|
||||
|
||||
class Match(): # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A class representing a match, together with its found position.
|
||||
|
||||
Fields:
|
||||
* filename: the file that the match was in.
|
||||
* line: the full line containing the match.
|
||||
* line_no: the line number.
|
||||
* pos: a tuple of (start, end) positions on the line where the match is.
|
||||
* name: the match itself.
|
||||
"""
|
||||
def __init__(self, filename, line, line_no, pos, name):
|
||||
# pylint: disable=too-many-arguments
|
||||
self.filename = filename
|
||||
self.line = line
|
||||
self.line_no = line_no
|
||||
self.pos = pos
|
||||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Return a formatted code listing representation of the erroneous line.
|
||||
"""
|
||||
gutter = format(self.line_no, "4d")
|
||||
underline = self.pos[0] * " " + (self.pos[1] - self.pos[0]) * "^"
|
||||
|
||||
return (
|
||||
" {0} |\n".format(" " * len(gutter)) +
|
||||
" {0} | {1}".format(gutter, self.line) +
|
||||
" {0} | {1}\n".format(" " * len(gutter), underline)
|
||||
)
|
||||
|
||||
class Problem(abc.ABC): # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
An abstract parent class representing a form of static analysis error.
|
||||
It extends an Abstract Base Class, which means it is not instantiable, and
|
||||
it also mandates certain abstract methods to be implemented in subclasses.
|
||||
"""
|
||||
# Class variable to control the quietness of all problems
|
||||
quiet = False
|
||||
def __init__(self):
|
||||
self.textwrapper = textwrap.TextWrapper()
|
||||
self.textwrapper.width = 80
|
||||
self.textwrapper.initial_indent = " > "
|
||||
self.textwrapper.subsequent_indent = " "
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Unified string representation method for all Problems.
|
||||
"""
|
||||
if self.__class__.quiet:
|
||||
return self.quiet_output()
|
||||
return self.verbose_output()
|
||||
|
||||
@abc.abstractmethod
|
||||
def quiet_output(self):
|
||||
"""
|
||||
The output when --quiet is enabled.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def verbose_output(self):
|
||||
"""
|
||||
The default output with explanation and code snippet if appropriate.
|
||||
"""
|
||||
pass
|
||||
|
||||
class SymbolNotInHeader(Problem): # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A problem that occurs when an exported/available symbol in the object file
|
||||
is not explicitly declared in header files. Created with
|
||||
NameCheck.check_symbols_declared_in_header()
|
||||
|
||||
Fields:
|
||||
* symbol_name: the name of the symbol.
|
||||
"""
|
||||
def __init__(self, symbol_name):
|
||||
self.symbol_name = symbol_name
|
||||
Problem.__init__(self)
|
||||
|
||||
def quiet_output(self):
|
||||
return "{0}".format(self.symbol_name)
|
||||
|
||||
def verbose_output(self):
|
||||
return self.textwrapper.fill(
|
||||
"'{0}' was found as an available symbol in the output of nm, "
|
||||
"however it was not declared in any header files."
|
||||
.format(self.symbol_name))
|
||||
|
||||
class PatternMismatch(Problem): # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A problem that occurs when something doesn't match the expected pattern.
|
||||
Created with NameCheck.check_match_pattern()
|
||||
|
||||
Fields:
|
||||
* pattern: the expected regex pattern
|
||||
* match: the Match object in question
|
||||
"""
|
||||
def __init__(self, pattern, match):
|
||||
self.pattern = pattern
|
||||
self.match = match
|
||||
Problem.__init__(self)
|
||||
|
||||
|
||||
def quiet_output(self):
|
||||
return (
|
||||
"{0}:{1}:{2}"
|
||||
.format(self.match.filename, self.match.line_no, self.match.name)
|
||||
)
|
||||
|
||||
def verbose_output(self):
|
||||
return self.textwrapper.fill(
|
||||
"{0}:{1}: '{2}' does not match the required pattern '{3}'."
|
||||
.format(
|
||||
self.match.filename,
|
||||
self.match.line_no,
|
||||
self.match.name,
|
||||
self.pattern
|
||||
)
|
||||
) + "\n" + str(self.match)
|
||||
|
||||
class Typo(Problem): # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A problem that occurs when a word using MBED or PSA doesn't
|
||||
appear to be defined as constants nor enum values. Created with
|
||||
NameCheck.check_for_typos()
|
||||
|
||||
Fields:
|
||||
* match: the Match object of the MBED|PSA name in question.
|
||||
"""
|
||||
def __init__(self, match):
|
||||
self.match = match
|
||||
Problem.__init__(self)
|
||||
|
||||
def quiet_output(self):
|
||||
return (
|
||||
"{0}:{1}:{2}"
|
||||
.format(self.match.filename, self.match.line_no, self.match.name)
|
||||
)
|
||||
|
||||
def verbose_output(self):
|
||||
return self.textwrapper.fill(
|
||||
"{0}:{1}: '{2}' looks like a typo. It was not found in any "
|
||||
"macros or any enums. If this is not a typo, put "
|
||||
"//no-check-names after it."
|
||||
.format(self.match.filename, self.match.line_no, self.match.name)
|
||||
) + "\n" + str(self.match)
|
||||
|
||||
class CodeParser():
|
||||
"""
|
||||
Class for retrieving files and parsing the code. This can be used
|
||||
independently of the checks that NameChecker performs, for example for
|
||||
list_internal_identifiers.py.
|
||||
"""
|
||||
def __init__(self, log):
|
||||
self.log = log
|
||||
build_tree.check_repo_path()
|
||||
|
||||
# Memo for storing "glob expression": set(filepaths)
|
||||
self.files = {}
|
||||
|
||||
# Globally excluded filenames.
|
||||
# Note that "*" can match directory separators in exclude lists.
|
||||
self.excluded_files = ["*/bn_mul", "*/compat-2.x.h"]
|
||||
|
||||
def comprehensive_parse(self):
|
||||
"""
|
||||
Comprehensive ("default") function to call each parsing function and
|
||||
retrieve various elements of the code, together with the source location.
|
||||
|
||||
Returns a dict of parsed item key to the corresponding List of Matches.
|
||||
"""
|
||||
self.log.info("Parsing source code...")
|
||||
self.log.debug(
|
||||
"The following files are excluded from the search: {}"
|
||||
.format(str(self.excluded_files))
|
||||
)
|
||||
|
||||
all_macros = {"public": [], "internal": [], "private":[]}
|
||||
all_macros["public"] = self.parse_macros([
|
||||
"include/mbedtls/*.h",
|
||||
"include/psa/*.h",
|
||||
"3rdparty/everest/include/everest/everest.h",
|
||||
"3rdparty/everest/include/everest/x25519.h"
|
||||
])
|
||||
all_macros["internal"] = self.parse_macros([
|
||||
"library/*.h",
|
||||
"tests/include/test/drivers/*.h",
|
||||
])
|
||||
all_macros["private"] = self.parse_macros([
|
||||
"library/*.c",
|
||||
])
|
||||
enum_consts = self.parse_enum_consts([
|
||||
"include/mbedtls/*.h",
|
||||
"include/psa/*.h",
|
||||
"library/*.h",
|
||||
"library/*.c",
|
||||
"3rdparty/everest/include/everest/everest.h",
|
||||
"3rdparty/everest/include/everest/x25519.h"
|
||||
])
|
||||
identifiers, excluded_identifiers = self.parse_identifiers([
|
||||
"include/mbedtls/*.h",
|
||||
"include/psa/*.h",
|
||||
"library/*.h",
|
||||
"3rdparty/everest/include/everest/everest.h",
|
||||
"3rdparty/everest/include/everest/x25519.h"
|
||||
], ["3rdparty/p256-m/p256-m/p256-m.h"])
|
||||
mbed_psa_words = self.parse_mbed_psa_words([
|
||||
"include/mbedtls/*.h",
|
||||
"include/psa/*.h",
|
||||
"library/*.h",
|
||||
"3rdparty/everest/include/everest/everest.h",
|
||||
"3rdparty/everest/include/everest/x25519.h",
|
||||
"library/*.c",
|
||||
"3rdparty/everest/library/everest.c",
|
||||
"3rdparty/everest/library/x25519.c"
|
||||
], ["library/psa_crypto_driver_wrappers.h"])
|
||||
symbols = self.parse_symbols()
|
||||
|
||||
# Remove identifier macros like mbedtls_printf or mbedtls_calloc
|
||||
identifiers_justname = [x.name for x in identifiers]
|
||||
actual_macros = {"public": [], "internal": []}
|
||||
for scope in actual_macros:
|
||||
for macro in all_macros[scope]:
|
||||
if macro.name not in identifiers_justname:
|
||||
actual_macros[scope].append(macro)
|
||||
|
||||
self.log.debug("Found:")
|
||||
# Aligns the counts on the assumption that none exceeds 4 digits
|
||||
for scope in actual_macros:
|
||||
self.log.debug(" {:4} Total {} Macros"
|
||||
.format(len(all_macros[scope]), scope))
|
||||
self.log.debug(" {:4} {} Non-identifier Macros"
|
||||
.format(len(actual_macros[scope]), scope))
|
||||
self.log.debug(" {:4} Enum Constants".format(len(enum_consts)))
|
||||
self.log.debug(" {:4} Identifiers".format(len(identifiers)))
|
||||
self.log.debug(" {:4} Exported Symbols".format(len(symbols)))
|
||||
return {
|
||||
"public_macros": actual_macros["public"],
|
||||
"internal_macros": actual_macros["internal"],
|
||||
"private_macros": all_macros["private"],
|
||||
"enum_consts": enum_consts,
|
||||
"identifiers": identifiers,
|
||||
"excluded_identifiers": excluded_identifiers,
|
||||
"symbols": symbols,
|
||||
"mbed_psa_words": mbed_psa_words
|
||||
}
|
||||
|
||||
def is_file_excluded(self, path, exclude_wildcards):
|
||||
"""Whether the given file path is excluded."""
|
||||
# exclude_wildcards may be None. Also, consider the global exclusions.
|
||||
exclude_wildcards = (exclude_wildcards or []) + self.excluded_files
|
||||
for pattern in exclude_wildcards:
|
||||
if fnmatch.fnmatch(path, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_all_files(self, include_wildcards, exclude_wildcards):
|
||||
"""
|
||||
Get all files that match any of the included UNIX-style wildcards
|
||||
and filter them into included and excluded lists.
|
||||
While the check_names script is designed only for use on UNIX/macOS
|
||||
(due to nm), this function alone will work fine on Windows even with
|
||||
forward slashes in the wildcard.
|
||||
|
||||
Args:
|
||||
* include_wildcards: a List of shell-style wildcards to match filepaths.
|
||||
* exclude_wildcards: a List of shell-style wildcards to exclude.
|
||||
|
||||
Returns:
|
||||
* inc_files: A List of relative filepaths for included files.
|
||||
* exc_files: A List of relative filepaths for excluded files.
|
||||
"""
|
||||
accumulator = set()
|
||||
all_wildcards = include_wildcards + (exclude_wildcards or [])
|
||||
for wildcard in all_wildcards:
|
||||
accumulator = accumulator.union(glob.iglob(wildcard))
|
||||
|
||||
inc_files = []
|
||||
exc_files = []
|
||||
for path in accumulator:
|
||||
if self.is_file_excluded(path, exclude_wildcards):
|
||||
exc_files.append(path)
|
||||
else:
|
||||
inc_files.append(path)
|
||||
return (inc_files, exc_files)
|
||||
|
||||
def get_included_files(self, include_wildcards, exclude_wildcards):
|
||||
"""
|
||||
Get all files that match any of the included UNIX-style wildcards.
|
||||
While the check_names script is designed only for use on UNIX/macOS
|
||||
(due to nm), this function alone will work fine on Windows even with
|
||||
forward slashes in the wildcard.
|
||||
|
||||
Args:
|
||||
* include_wildcards: a List of shell-style wildcards to match filepaths.
|
||||
* exclude_wildcards: a List of shell-style wildcards to exclude.
|
||||
|
||||
Returns a List of relative filepaths.
|
||||
"""
|
||||
accumulator = set()
|
||||
|
||||
for include_wildcard in include_wildcards:
|
||||
accumulator = accumulator.union(glob.iglob(include_wildcard))
|
||||
|
||||
return list(path for path in accumulator
|
||||
if not self.is_file_excluded(path, exclude_wildcards))
|
||||
|
||||
def parse_macros(self, include, exclude=None):
|
||||
"""
|
||||
Parse all macros defined by #define preprocessor directives.
|
||||
|
||||
Args:
|
||||
* include: A List of glob expressions to look for files through.
|
||||
* exclude: A List of glob expressions for excluding files.
|
||||
|
||||
Returns a List of Match objects for the found macros.
|
||||
"""
|
||||
macro_regex = re.compile(r"# *define +(?P<macro>\w+)")
|
||||
exclusions = (
|
||||
"asm", "inline", "EMIT", "_CRT_SECURE_NO_DEPRECATE", "MULADDC_"
|
||||
)
|
||||
|
||||
files = self.get_included_files(include, exclude)
|
||||
self.log.debug("Looking for macros in {} files".format(len(files)))
|
||||
|
||||
macros = []
|
||||
for header_file in files:
|
||||
with open(header_file, "r", encoding="utf-8") as header:
|
||||
for line_no, line in enumerate(header):
|
||||
for macro in macro_regex.finditer(line):
|
||||
if macro.group("macro").startswith(exclusions):
|
||||
continue
|
||||
|
||||
macros.append(Match(
|
||||
header_file,
|
||||
line,
|
||||
line_no,
|
||||
macro.span("macro"),
|
||||
macro.group("macro")))
|
||||
|
||||
return macros
|
||||
|
||||
def parse_mbed_psa_words(self, include, exclude=None):
|
||||
"""
|
||||
Parse all words in the file that begin with MBED|PSA, in and out of
|
||||
macros, comments, anything.
|
||||
|
||||
Args:
|
||||
* include: A List of glob expressions to look for files through.
|
||||
* exclude: A List of glob expressions for excluding files.
|
||||
|
||||
Returns a List of Match objects for words beginning with MBED|PSA.
|
||||
"""
|
||||
# Typos of TLS are common, hence the broader check below than MBEDTLS.
|
||||
mbed_regex = re.compile(r"\b(MBED.+?|PSA)_[A-Z0-9_]*")
|
||||
exclusions = re.compile(r"// *no-check-names|#error")
|
||||
|
||||
files = self.get_included_files(include, exclude)
|
||||
self.log.debug(
|
||||
"Looking for MBED|PSA words in {} files"
|
||||
.format(len(files))
|
||||
)
|
||||
|
||||
mbed_psa_words = []
|
||||
for filename in files:
|
||||
with open(filename, "r", encoding="utf-8") as fp:
|
||||
for line_no, line in enumerate(fp):
|
||||
if exclusions.search(line):
|
||||
continue
|
||||
|
||||
for name in mbed_regex.finditer(line):
|
||||
mbed_psa_words.append(Match(
|
||||
filename,
|
||||
line,
|
||||
line_no,
|
||||
name.span(0),
|
||||
name.group(0)))
|
||||
|
||||
return mbed_psa_words
|
||||
|
||||
def parse_enum_consts(self, include, exclude=None):
|
||||
"""
|
||||
Parse all enum value constants that are declared.
|
||||
|
||||
Args:
|
||||
* include: A List of glob expressions to look for files through.
|
||||
* exclude: A List of glob expressions for excluding files.
|
||||
|
||||
Returns a List of Match objects for the findings.
|
||||
"""
|
||||
files = self.get_included_files(include, exclude)
|
||||
self.log.debug("Looking for enum consts in {} files".format(len(files)))
|
||||
|
||||
# Emulate a finite state machine to parse enum declarations.
|
||||
# OUTSIDE_KEYWORD = outside the enum keyword
|
||||
# IN_BRACES = inside enum opening braces
|
||||
# IN_BETWEEN = between enum keyword and opening braces
|
||||
states = enum.Enum("FSM", ["OUTSIDE_KEYWORD", "IN_BRACES", "IN_BETWEEN"])
|
||||
enum_consts = []
|
||||
for header_file in files:
|
||||
state = states.OUTSIDE_KEYWORD
|
||||
with open(header_file, "r", encoding="utf-8") as header:
|
||||
for line_no, line in enumerate(header):
|
||||
# Match typedefs and brackets only when they are at the
|
||||
# beginning of the line -- if they are indented, they might
|
||||
# be sub-structures within structs, etc.
|
||||
optional_c_identifier = r"([_a-zA-Z][_a-zA-Z0-9]*)?"
|
||||
if (state == states.OUTSIDE_KEYWORD and
|
||||
re.search(r"^(typedef +)?enum " + \
|
||||
optional_c_identifier + \
|
||||
r" *{", line)):
|
||||
state = states.IN_BRACES
|
||||
elif (state == states.OUTSIDE_KEYWORD and
|
||||
re.search(r"^(typedef +)?enum", line)):
|
||||
state = states.IN_BETWEEN
|
||||
elif (state == states.IN_BETWEEN and
|
||||
re.search(r"^{", line)):
|
||||
state = states.IN_BRACES
|
||||
elif (state == states.IN_BRACES and
|
||||
re.search(r"^}", line)):
|
||||
state = states.OUTSIDE_KEYWORD
|
||||
elif (state == states.IN_BRACES and
|
||||
not re.search(r"^ *#", line)):
|
||||
enum_const = re.search(r"^ *(?P<enum_const>\w+)", line)
|
||||
if not enum_const:
|
||||
continue
|
||||
|
||||
enum_consts.append(Match(
|
||||
header_file,
|
||||
line,
|
||||
line_no,
|
||||
enum_const.span("enum_const"),
|
||||
enum_const.group("enum_const")))
|
||||
|
||||
return enum_consts
|
||||
|
||||
IGNORED_CHUNK_REGEX = re.compile('|'.join([
|
||||
r'/\*.*?\*/', # block comment entirely on one line
|
||||
r'//.*', # line comment
|
||||
r'(?P<string>")(?:[^\\\"]|\\.)*"', # string literal
|
||||
]))
|
||||
|
||||
def strip_comments_and_literals(self, line, in_block_comment):
|
||||
"""Strip comments and string literals from line.
|
||||
|
||||
Continuation lines are not supported.
|
||||
|
||||
If in_block_comment is true, assume that the line starts inside a
|
||||
block comment.
|
||||
|
||||
Return updated values of (line, in_block_comment) where:
|
||||
* Comments in line have been replaced by a space (or nothing at the
|
||||
start or end of the line).
|
||||
* String contents have been removed.
|
||||
* in_block_comment indicates whether the line ends inside a block
|
||||
comment that continues on the next line.
|
||||
"""
|
||||
|
||||
# Terminate current multiline comment?
|
||||
if in_block_comment:
|
||||
m = re.search(r"\*/", line)
|
||||
if m:
|
||||
in_block_comment = False
|
||||
line = line[m.end(0):]
|
||||
else:
|
||||
return '', True
|
||||
|
||||
# Remove full comments and string literals.
|
||||
# Do it all together to handle cases like "/*" correctly.
|
||||
# Note that continuation lines are not supported.
|
||||
line = re.sub(self.IGNORED_CHUNK_REGEX,
|
||||
lambda s: '""' if s.group('string') else ' ',
|
||||
line)
|
||||
|
||||
# Start an unfinished comment?
|
||||
# (If `/*` was part of a complete comment, it's already been removed.)
|
||||
m = re.search(r"/\*", line)
|
||||
if m:
|
||||
in_block_comment = True
|
||||
line = line[:m.start(0)]
|
||||
|
||||
return line, in_block_comment
|
||||
|
||||
IDENTIFIER_REGEX = re.compile('|'.join([
|
||||
# Match " something(a" or " *something(a". Functions.
|
||||
# Assumptions:
|
||||
# - function definition from return type to one of its arguments is
|
||||
# all on one line
|
||||
# - function definition line only contains alphanumeric, asterisk,
|
||||
# underscore, and open bracket
|
||||
r".* \**(\w+) *\( *\w",
|
||||
# Match "(*something)(".
|
||||
r".*\( *\* *(\w+) *\) *\(",
|
||||
# Match names of named data structures.
|
||||
r"(?:typedef +)?(?:struct|union|enum) +(\w+)(?: *{)?$",
|
||||
# Match names of typedef instances, after closing bracket.
|
||||
r"}? *(\w+)[;[].*",
|
||||
]))
|
||||
# The regex below is indented for clarity.
|
||||
EXCLUSION_LINES = re.compile("|".join([
|
||||
r"extern +\"C\"",
|
||||
r"(typedef +)?(struct|union|enum)( *{)?$",
|
||||
r"} *;?$",
|
||||
r"$",
|
||||
r"//",
|
||||
r"#",
|
||||
]))
|
||||
|
||||
def parse_identifiers_in_file(self, header_file, identifiers):
|
||||
"""
|
||||
Parse all lines of a header where a function/enum/struct/union/typedef
|
||||
identifier is declared, based on some regex and heuristics. Highly
|
||||
dependent on formatting style.
|
||||
|
||||
Append found matches to the list ``identifiers``.
|
||||
"""
|
||||
|
||||
with open(header_file, "r", encoding="utf-8") as header:
|
||||
in_block_comment = False
|
||||
# The previous line variable is used for concatenating lines
|
||||
# when identifiers are formatted and spread across multiple
|
||||
# lines.
|
||||
previous_line = ""
|
||||
|
||||
for line_no, line in enumerate(header):
|
||||
line, in_block_comment = \
|
||||
self.strip_comments_and_literals(line, in_block_comment)
|
||||
|
||||
if self.EXCLUSION_LINES.match(line):
|
||||
previous_line = ""
|
||||
continue
|
||||
|
||||
# If the line contains only space-separated alphanumeric
|
||||
# characters (or underscore, asterisk, or open parenthesis),
|
||||
# and nothing else, high chance it's a declaration that
|
||||
# continues on the next line
|
||||
if re.search(r"^([\w\*\(]+\s+)+$", line):
|
||||
previous_line += line
|
||||
continue
|
||||
|
||||
# If previous line seemed to start an unfinished declaration
|
||||
# (as above), concat and treat them as one.
|
||||
if previous_line:
|
||||
line = previous_line.strip() + " " + line.strip() + "\n"
|
||||
previous_line = ""
|
||||
|
||||
# Skip parsing if line has a space in front = heuristic to
|
||||
# skip function argument lines (highly subject to formatting
|
||||
# changes)
|
||||
if line[0] == " ":
|
||||
continue
|
||||
|
||||
identifier = self.IDENTIFIER_REGEX.search(line)
|
||||
|
||||
if not identifier:
|
||||
continue
|
||||
|
||||
# Find the group that matched, and append it
|
||||
for group in identifier.groups():
|
||||
if not group:
|
||||
continue
|
||||
|
||||
identifiers.append(Match(
|
||||
header_file,
|
||||
line,
|
||||
line_no,
|
||||
identifier.span(),
|
||||
group))
|
||||
|
||||
def parse_identifiers(self, include, exclude=None):
|
||||
"""
|
||||
Parse all lines of a header where a function/enum/struct/union/typedef
|
||||
identifier is declared, based on some regex and heuristics. Highly
|
||||
dependent on formatting style. Identifiers in excluded files are still
|
||||
parsed
|
||||
|
||||
Args:
|
||||
* include: A List of glob expressions to look for files through.
|
||||
* exclude: A List of glob expressions for excluding files.
|
||||
|
||||
Returns: a Tuple of two Lists of Match objects with identifiers.
|
||||
* included_identifiers: A List of Match objects with identifiers from
|
||||
included files.
|
||||
* excluded_identifiers: A List of Match objects with identifiers from
|
||||
excluded files.
|
||||
"""
|
||||
|
||||
included_files, excluded_files = \
|
||||
self.get_all_files(include, exclude)
|
||||
|
||||
self.log.debug("Looking for included identifiers in {} files".format \
|
||||
(len(included_files)))
|
||||
|
||||
included_identifiers = []
|
||||
excluded_identifiers = []
|
||||
for header_file in included_files:
|
||||
self.parse_identifiers_in_file(header_file, included_identifiers)
|
||||
for header_file in excluded_files:
|
||||
self.parse_identifiers_in_file(header_file, excluded_identifiers)
|
||||
|
||||
return (included_identifiers, excluded_identifiers)
|
||||
|
||||
def parse_symbols(self):
|
||||
"""
|
||||
Compile the Mbed TLS libraries, and parse the TLS, Crypto, and x509
|
||||
object files using nm to retrieve the list of referenced symbols.
|
||||
Exceptions thrown here are rethrown because they would be critical
|
||||
errors that void several tests, and thus needs to halt the program. This
|
||||
is explicitly done for clarity.
|
||||
|
||||
Returns a List of unique symbols defined and used in the libraries.
|
||||
"""
|
||||
self.log.info("Compiling...")
|
||||
symbols = []
|
||||
|
||||
# Back up the config and atomically compile with the full configuration.
|
||||
shutil.copy(
|
||||
"include/mbedtls/mbedtls_config.h",
|
||||
"include/mbedtls/mbedtls_config.h.bak"
|
||||
)
|
||||
try:
|
||||
# Use check=True in all subprocess calls so that failures are raised
|
||||
# as exceptions and logged.
|
||||
subprocess.run(
|
||||
["python3", "scripts/config.py", "full"],
|
||||
universal_newlines=True,
|
||||
check=True
|
||||
)
|
||||
my_environment = os.environ.copy()
|
||||
my_environment["CFLAGS"] = "-fno-asynchronous-unwind-tables"
|
||||
# Run make clean separately to lib to prevent unwanted behavior when
|
||||
# make is invoked with parallelism.
|
||||
subprocess.run(
|
||||
["make", "clean"],
|
||||
universal_newlines=True,
|
||||
check=True
|
||||
)
|
||||
subprocess.run(
|
||||
["make", "lib"],
|
||||
env=my_environment,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
check=True
|
||||
)
|
||||
|
||||
# Perform object file analysis using nm
|
||||
symbols = self.parse_symbols_from_nm([
|
||||
"library/libmbedcrypto.a",
|
||||
"library/libmbedtls.a",
|
||||
"library/libmbedx509.a"
|
||||
])
|
||||
|
||||
subprocess.run(
|
||||
["make", "clean"],
|
||||
universal_newlines=True,
|
||||
check=True
|
||||
)
|
||||
except subprocess.CalledProcessError as error:
|
||||
self.log.debug(error.output)
|
||||
raise error
|
||||
finally:
|
||||
# Put back the original config regardless of there being errors.
|
||||
# Works also for keyboard interrupts.
|
||||
shutil.move(
|
||||
"include/mbedtls/mbedtls_config.h.bak",
|
||||
"include/mbedtls/mbedtls_config.h"
|
||||
)
|
||||
|
||||
return symbols
|
||||
|
||||
def parse_symbols_from_nm(self, object_files):
|
||||
"""
|
||||
Run nm to retrieve the list of referenced symbols in each object file.
|
||||
Does not return the position data since it is of no use.
|
||||
|
||||
Args:
|
||||
* object_files: a List of compiled object filepaths to search through.
|
||||
|
||||
Returns a List of unique symbols defined and used in any of the object
|
||||
files.
|
||||
"""
|
||||
nm_undefined_regex = re.compile(r"^\S+: +U |^$|^\S+:$")
|
||||
nm_valid_regex = re.compile(r"^\S+( [0-9A-Fa-f]+)* . _*(?P<symbol>\w+)")
|
||||
exclusions = ("FStar", "Hacl")
|
||||
|
||||
symbols = []
|
||||
|
||||
# Gather all outputs of nm
|
||||
nm_output = ""
|
||||
for lib in object_files:
|
||||
nm_output += subprocess.run(
|
||||
["nm", "-og", lib],
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
check=True
|
||||
).stdout
|
||||
|
||||
for line in nm_output.splitlines():
|
||||
if not nm_undefined_regex.search(line):
|
||||
symbol = nm_valid_regex.search(line)
|
||||
if (symbol and not symbol.group("symbol").startswith(exclusions)):
|
||||
symbols.append(symbol.group("symbol"))
|
||||
else:
|
||||
self.log.error(line)
|
||||
|
||||
return symbols
|
||||
|
||||
class NameChecker():
|
||||
"""
|
||||
Representation of the core name checking operation performed by this script.
|
||||
"""
|
||||
def __init__(self, parse_result, log):
|
||||
self.parse_result = parse_result
|
||||
self.log = log
|
||||
|
||||
def perform_checks(self, quiet=False):
|
||||
"""
|
||||
A comprehensive checker that performs each check in order, and outputs
|
||||
a final verdict.
|
||||
|
||||
Args:
|
||||
* quiet: whether to hide detailed problem explanation.
|
||||
"""
|
||||
self.log.info("=============")
|
||||
Problem.quiet = quiet
|
||||
problems = 0
|
||||
problems += self.check_symbols_declared_in_header()
|
||||
|
||||
pattern_checks = [
|
||||
("public_macros", PUBLIC_MACRO_PATTERN),
|
||||
("internal_macros", INTERNAL_MACRO_PATTERN),
|
||||
("enum_consts", CONSTANTS_PATTERN),
|
||||
("identifiers", IDENTIFIER_PATTERN)
|
||||
]
|
||||
for group, check_pattern in pattern_checks:
|
||||
problems += self.check_match_pattern(group, check_pattern)
|
||||
|
||||
problems += self.check_for_typos()
|
||||
|
||||
self.log.info("=============")
|
||||
if problems > 0:
|
||||
self.log.info("FAIL: {0} problem(s) to fix".format(str(problems)))
|
||||
if quiet:
|
||||
self.log.info("Remove --quiet to see explanations.")
|
||||
else:
|
||||
self.log.info("Use --quiet for minimal output.")
|
||||
return 1
|
||||
else:
|
||||
self.log.info("PASS")
|
||||
return 0
|
||||
|
||||
def check_symbols_declared_in_header(self):
|
||||
"""
|
||||
Perform a check that all detected symbols in the library object files
|
||||
are properly declared in headers.
|
||||
Assumes parse_names_in_source() was called before this.
|
||||
|
||||
Returns the number of problems that need fixing.
|
||||
"""
|
||||
problems = []
|
||||
all_identifiers = self.parse_result["identifiers"] + \
|
||||
self.parse_result["excluded_identifiers"]
|
||||
|
||||
for symbol in self.parse_result["symbols"]:
|
||||
found_symbol_declared = False
|
||||
for identifier_match in all_identifiers:
|
||||
if symbol == identifier_match.name:
|
||||
found_symbol_declared = True
|
||||
break
|
||||
|
||||
if not found_symbol_declared:
|
||||
problems.append(SymbolNotInHeader(symbol))
|
||||
|
||||
self.output_check_result("All symbols in header", problems)
|
||||
return len(problems)
|
||||
|
||||
def check_match_pattern(self, group_to_check, check_pattern):
|
||||
"""
|
||||
Perform a check that all items of a group conform to a regex pattern.
|
||||
Assumes parse_names_in_source() was called before this.
|
||||
|
||||
Args:
|
||||
* group_to_check: string key to index into self.parse_result.
|
||||
* check_pattern: the regex to check against.
|
||||
|
||||
Returns the number of problems that need fixing.
|
||||
"""
|
||||
problems = []
|
||||
|
||||
for item_match in self.parse_result[group_to_check]:
|
||||
if not re.search(check_pattern, item_match.name):
|
||||
problems.append(PatternMismatch(check_pattern, item_match))
|
||||
# Double underscore should not be used for names
|
||||
if re.search(r".*__.*", item_match.name):
|
||||
problems.append(
|
||||
PatternMismatch("no double underscore allowed", item_match))
|
||||
|
||||
self.output_check_result(
|
||||
"Naming patterns of {}".format(group_to_check),
|
||||
problems)
|
||||
return len(problems)
|
||||
|
||||
def check_for_typos(self):
|
||||
"""
|
||||
Perform a check that all words in the source code beginning with MBED are
|
||||
either defined as macros, or as enum constants.
|
||||
Assumes parse_names_in_source() was called before this.
|
||||
|
||||
Returns the number of problems that need fixing.
|
||||
"""
|
||||
problems = []
|
||||
|
||||
# Set comprehension, equivalent to a list comprehension wrapped by set()
|
||||
all_caps_names = {
|
||||
match.name
|
||||
for match
|
||||
in self.parse_result["public_macros"] +
|
||||
self.parse_result["internal_macros"] +
|
||||
self.parse_result["private_macros"] +
|
||||
self.parse_result["enum_consts"]
|
||||
}
|
||||
typo_exclusion = re.compile(r"XXX|__|_$|^MBEDTLS_.*CONFIG_FILE$|"
|
||||
r"MBEDTLS_TEST_LIBTESTDRIVER*|"
|
||||
r"PSA_CRYPTO_DRIVER_TEST")
|
||||
|
||||
for name_match in self.parse_result["mbed_psa_words"]:
|
||||
found = name_match.name in all_caps_names
|
||||
|
||||
# Since MBEDTLS_PSA_ACCEL_XXX defines are defined by the
|
||||
# PSA driver, they will not exist as macros. However, they
|
||||
# should still be checked for typos using the equivalent
|
||||
# BUILTINs that exist.
|
||||
if "MBEDTLS_PSA_ACCEL_" in name_match.name:
|
||||
found = name_match.name.replace(
|
||||
"MBEDTLS_PSA_ACCEL_",
|
||||
"MBEDTLS_PSA_BUILTIN_") in all_caps_names
|
||||
|
||||
if not found and not typo_exclusion.search(name_match.name):
|
||||
problems.append(Typo(name_match))
|
||||
|
||||
self.output_check_result("Likely typos", problems)
|
||||
return len(problems)
|
||||
|
||||
def output_check_result(self, name, problems):
|
||||
"""
|
||||
Write out the PASS/FAIL status of a performed check depending on whether
|
||||
there were problems.
|
||||
|
||||
Args:
|
||||
* name: the name of the test
|
||||
* problems: a List of encountered Problems
|
||||
"""
|
||||
if problems:
|
||||
self.log.info("{}: FAIL\n".format(name))
|
||||
for problem in problems:
|
||||
self.log.warning(str(problem))
|
||||
else:
|
||||
self.log.info("{}: PASS".format(name))
|
||||
|
||||
def main():
|
||||
"""
|
||||
Perform argument parsing, and create an instance of CodeParser and
|
||||
NameChecker to begin the core operation.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=(
|
||||
"This script confirms that the naming of all symbols and identifiers "
|
||||
"in Mbed TLS are consistent with the house style and are also "
|
||||
"self-consistent.\n\n"
|
||||
"Expected to be run from the Mbed TLS root directory.")
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose",
|
||||
action="store_true",
|
||||
help="show parse results"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-q", "--quiet",
|
||||
action="store_true",
|
||||
help="hide unnecessary text, explanations, and highlights"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Configure the global logger, which is then passed to the classes below
|
||||
log = logging.getLogger()
|
||||
log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
|
||||
log.addHandler(logging.StreamHandler())
|
||||
|
||||
try:
|
||||
code_parser = CodeParser(log)
|
||||
parse_result = code_parser.comprehensive_parse()
|
||||
except Exception: # pylint: disable=broad-except
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
|
||||
name_checker = NameChecker(parse_result, log)
|
||||
return_code = name_checker.perform_checks(quiet=args.quiet)
|
||||
|
||||
sys.exit(return_code)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
212
externals/mbedtls/tests/scripts/check_test_cases.py
vendored
Executable file
212
externals/mbedtls/tests/scripts/check_test_cases.py
vendored
Executable file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Sanity checks for test data.
|
||||
|
||||
This program contains a class for traversing test cases that can be used
|
||||
independently of the checks.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class Results:
|
||||
"""Store file and line information about errors or warnings in test suites."""
|
||||
|
||||
def __init__(self, options):
|
||||
self.errors = 0
|
||||
self.warnings = 0
|
||||
self.ignore_warnings = options.quiet
|
||||
|
||||
def error(self, file_name, line_number, fmt, *args):
|
||||
sys.stderr.write(('{}:{}:ERROR:' + fmt + '\n').
|
||||
format(file_name, line_number, *args))
|
||||
self.errors += 1
|
||||
|
||||
def warning(self, file_name, line_number, fmt, *args):
|
||||
if not self.ignore_warnings:
|
||||
sys.stderr.write(('{}:{}:Warning:' + fmt + '\n')
|
||||
.format(file_name, line_number, *args))
|
||||
self.warnings += 1
|
||||
|
||||
class TestDescriptionExplorer:
|
||||
"""An iterator over test cases with descriptions.
|
||||
|
||||
The test cases that have descriptions are:
|
||||
* Individual unit tests (entries in a .data file) in test suites.
|
||||
* Individual test cases in ssl-opt.sh.
|
||||
|
||||
This is an abstract class. To use it, derive a class that implements
|
||||
the process_test_case method, and call walk_all().
|
||||
"""
|
||||
|
||||
def process_test_case(self, per_file_state,
|
||||
file_name, line_number, description):
|
||||
"""Process a test case.
|
||||
|
||||
per_file_state: an object created by new_per_file_state() at the beginning
|
||||
of each file.
|
||||
file_name: a relative path to the file containing the test case.
|
||||
line_number: the line number in the given file.
|
||||
description: the test case description as a byte string.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def new_per_file_state(self):
|
||||
"""Return a new per-file state object.
|
||||
|
||||
The default per-file state object is None. Child classes that require per-file
|
||||
state may override this method.
|
||||
"""
|
||||
#pylint: disable=no-self-use
|
||||
return None
|
||||
|
||||
def walk_test_suite(self, data_file_name):
|
||||
"""Iterate over the test cases in the given unit test data file."""
|
||||
in_paragraph = False
|
||||
descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
|
||||
with open(data_file_name, 'rb') as data_file:
|
||||
for line_number, line in enumerate(data_file, 1):
|
||||
line = line.rstrip(b'\r\n')
|
||||
if not line:
|
||||
in_paragraph = False
|
||||
continue
|
||||
if line.startswith(b'#'):
|
||||
continue
|
||||
if not in_paragraph:
|
||||
# This is a test case description line.
|
||||
self.process_test_case(descriptions,
|
||||
data_file_name, line_number, line)
|
||||
in_paragraph = True
|
||||
|
||||
def collect_from_script(self, file_name):
|
||||
"""Collect the test cases in a script by calling its listing test cases
|
||||
option"""
|
||||
descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
|
||||
listed = subprocess.check_output(['sh', file_name, '--list-test-cases'])
|
||||
# Assume test file is responsible for printing identical format of
|
||||
# test case description between --list-test-cases and its OUTCOME.CSV
|
||||
#
|
||||
# idx indicates the number of test case since there is no line number
|
||||
# in the script for each test case.
|
||||
for idx, description in enumerate(listed.splitlines()):
|
||||
self.process_test_case(descriptions,
|
||||
file_name,
|
||||
idx,
|
||||
description.rstrip())
|
||||
|
||||
@staticmethod
|
||||
def collect_test_directories():
|
||||
"""Get the relative path for the TLS and Crypto test directories."""
|
||||
if os.path.isdir('tests'):
|
||||
tests_dir = 'tests'
|
||||
elif os.path.isdir('suites'):
|
||||
tests_dir = '.'
|
||||
elif os.path.isdir('../suites'):
|
||||
tests_dir = '..'
|
||||
directories = [tests_dir]
|
||||
return directories
|
||||
|
||||
def walk_all(self):
|
||||
"""Iterate over all named test cases."""
|
||||
test_directories = self.collect_test_directories()
|
||||
for directory in test_directories:
|
||||
for data_file_name in glob.glob(os.path.join(directory, 'suites',
|
||||
'*.data')):
|
||||
self.walk_test_suite(data_file_name)
|
||||
|
||||
for sh_file in ['ssl-opt.sh', 'compat.sh']:
|
||||
sh_file = os.path.join(directory, sh_file)
|
||||
if os.path.exists(sh_file):
|
||||
self.collect_from_script(sh_file)
|
||||
|
||||
class TestDescriptions(TestDescriptionExplorer):
|
||||
"""Collect the available test cases."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.descriptions = set()
|
||||
|
||||
def process_test_case(self, _per_file_state,
|
||||
file_name, _line_number, description):
|
||||
"""Record an available test case."""
|
||||
base_name = re.sub(r'\.[^.]*$', '', re.sub(r'.*/', '', file_name))
|
||||
key = ';'.join([base_name, description.decode('utf-8')])
|
||||
self.descriptions.add(key)
|
||||
|
||||
def collect_available_test_cases():
|
||||
"""Collect the available test cases."""
|
||||
explorer = TestDescriptions()
|
||||
explorer.walk_all()
|
||||
return sorted(explorer.descriptions)
|
||||
|
||||
class DescriptionChecker(TestDescriptionExplorer):
|
||||
"""Check all test case descriptions.
|
||||
|
||||
* Check that each description is valid (length, allowed character set, etc.).
|
||||
* Check that there is no duplicated description inside of one test suite.
|
||||
"""
|
||||
|
||||
def __init__(self, results):
|
||||
self.results = results
|
||||
|
||||
def new_per_file_state(self):
|
||||
"""Dictionary mapping descriptions to their line number."""
|
||||
return {}
|
||||
|
||||
def process_test_case(self, per_file_state,
|
||||
file_name, line_number, description):
|
||||
"""Check test case descriptions for errors."""
|
||||
results = self.results
|
||||
seen = per_file_state
|
||||
if description in seen:
|
||||
results.error(file_name, line_number,
|
||||
'Duplicate description (also line {})',
|
||||
seen[description])
|
||||
return
|
||||
if re.search(br'[\t;]', description):
|
||||
results.error(file_name, line_number,
|
||||
'Forbidden character \'{}\' in description',
|
||||
re.search(br'[\t;]', description).group(0).decode('ascii'))
|
||||
if re.search(br'[^ -~]', description):
|
||||
results.error(file_name, line_number,
|
||||
'Non-ASCII character in description')
|
||||
if len(description) > 66:
|
||||
results.warning(file_name, line_number,
|
||||
'Test description too long ({} > 66)',
|
||||
len(description))
|
||||
seen[description] = line_number
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--list-all',
|
||||
action='store_true',
|
||||
help='List all test cases, without doing checks')
|
||||
parser.add_argument('--quiet', '-q',
|
||||
action='store_true',
|
||||
help='Hide warnings')
|
||||
parser.add_argument('--verbose', '-v',
|
||||
action='store_false', dest='quiet',
|
||||
help='Show warnings (default: on; undoes --quiet)')
|
||||
options = parser.parse_args()
|
||||
if options.list_all:
|
||||
descriptions = collect_available_test_cases()
|
||||
sys.stdout.write('\n'.join(descriptions + ['']))
|
||||
return
|
||||
results = Results(options)
|
||||
checker = DescriptionChecker(results)
|
||||
checker.walk_all()
|
||||
if (results.warnings or results.errors) and not options.quiet:
|
||||
sys.stderr.write('{}: {} errors, {} warnings\n'
|
||||
.format(sys.argv[0], results.errors, results.warnings))
|
||||
sys.exit(1 if results.errors else 0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
557
externals/mbedtls/tests/scripts/depends.py
vendored
Executable file
557
externals/mbedtls/tests/scripts/depends.py
vendored
Executable file
@@ -0,0 +1,557 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Test Mbed TLS with a subset of algorithms.
|
||||
|
||||
This script can be divided into several steps:
|
||||
|
||||
First, include/mbedtls/mbedtls_config.h or a different config file passed
|
||||
in the arguments is parsed to extract any configuration options (using config.py).
|
||||
|
||||
Then, test domains (groups of jobs, tests) are built based on predefined data
|
||||
collected in the DomainData class. Here, each domain has five major traits:
|
||||
- domain name, can be used to run only specific tests via command-line;
|
||||
- configuration building method, described in detail below;
|
||||
- list of symbols passed to the configuration building method;
|
||||
- commands to be run on each job (only build, build and test, or any other custom);
|
||||
- optional list of symbols to be excluded from testing.
|
||||
|
||||
The configuration building method can be one of the three following:
|
||||
|
||||
- ComplementaryDomain - build a job for each passed symbol by disabling a single
|
||||
symbol and its reverse dependencies (defined in REVERSE_DEPENDENCIES);
|
||||
|
||||
- ExclusiveDomain - build a job where, for each passed symbol, only this particular
|
||||
one is defined and other symbols from the list are unset. For each job look for
|
||||
any non-standard symbols to set/unset in EXCLUSIVE_GROUPS. These are usually not
|
||||
direct dependencies, but rather non-trivial results of other configs missing. Then
|
||||
look for any unset symbols and handle their reverse dependencies.
|
||||
Examples of EXCLUSIVE_GROUPS usage:
|
||||
- MBEDTLS_SHA512_C job turns off all hashes except SHA512. MBEDTLS_SSL_COOKIE_C
|
||||
requires either SHA256 or SHA384 to work, so it also has to be disabled.
|
||||
This is not a dependency on SHA512_C, but a result of an exclusive domain
|
||||
config building method. Relevant field:
|
||||
'MBEDTLS_SHA512_C': ['-MBEDTLS_SSL_COOKIE_C'],
|
||||
|
||||
- DualDomain - combination of the two above - both complementary and exclusive domain
|
||||
job generation code will be run. Currently only used for hashes.
|
||||
|
||||
Lastly, the collected jobs are executed and (optionally) tested, with
|
||||
error reporting and coloring as configured in options. Each test starts with
|
||||
a full config without a couple of slowing down or unnecessary options
|
||||
(see set_reference_config), then the specific job config is derived.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Union
|
||||
|
||||
# Add the Mbed TLS Python library directory to the module search path
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
import config
|
||||
|
||||
class Colors: # pylint: disable=too-few-public-methods
|
||||
"""Minimalistic support for colored output.
|
||||
Each field of an object of this class is either None if colored output
|
||||
is not possible or not desired, or a pair of strings (start, stop) such
|
||||
that outputting start switches the text color to the desired color and
|
||||
stop switches the text color back to the default."""
|
||||
red = None
|
||||
green = None
|
||||
cyan = None
|
||||
bold_red = None
|
||||
bold_green = None
|
||||
def __init__(self, options=None):
|
||||
"""Initialize color profile according to passed options."""
|
||||
if not options or options.color in ['no', 'never']:
|
||||
want_color = False
|
||||
elif options.color in ['yes', 'always']:
|
||||
want_color = True
|
||||
else:
|
||||
want_color = sys.stderr.isatty()
|
||||
if want_color:
|
||||
# Assume ANSI compatible terminal
|
||||
normal = '\033[0m'
|
||||
self.red = ('\033[31m', normal)
|
||||
self.green = ('\033[32m', normal)
|
||||
self.cyan = ('\033[36m', normal)
|
||||
self.bold_red = ('\033[1;31m', normal)
|
||||
self.bold_green = ('\033[1;32m', normal)
|
||||
NO_COLORS = Colors(None)
|
||||
|
||||
def log_line(text, prefix='depends.py:', suffix='', color=None):
|
||||
"""Print a status message."""
|
||||
if color is not None:
|
||||
prefix = color[0] + prefix
|
||||
suffix = suffix + color[1]
|
||||
sys.stderr.write(prefix + ' ' + text + suffix + '\n')
|
||||
sys.stderr.flush()
|
||||
|
||||
def log_command(cmd):
|
||||
"""Print a trace of the specified command.
|
||||
cmd is a list of strings: a command name and its arguments."""
|
||||
log_line(' '.join(cmd), prefix='+')
|
||||
|
||||
def backup_config(options):
|
||||
"""Back up the library configuration file (mbedtls_config.h).
|
||||
If the backup file already exists, it is presumed to be the desired backup,
|
||||
so don't make another backup."""
|
||||
if os.path.exists(options.config_backup):
|
||||
options.own_backup = False
|
||||
else:
|
||||
options.own_backup = True
|
||||
shutil.copy(options.config, options.config_backup)
|
||||
|
||||
def restore_config(options):
|
||||
"""Restore the library configuration file (mbedtls_config.h).
|
||||
Remove the backup file if it was saved earlier."""
|
||||
if options.own_backup:
|
||||
shutil.move(options.config_backup, options.config)
|
||||
else:
|
||||
shutil.copy(options.config_backup, options.config)
|
||||
|
||||
def option_exists(conf, option):
|
||||
return option in conf.settings
|
||||
|
||||
def set_config_option_value(conf, option, colors, value: Union[bool, str]):
|
||||
"""Set/unset a configuration option, optionally specifying a value.
|
||||
value can be either True/False (set/unset config option), or a string,
|
||||
which will make a symbol defined with a certain value."""
|
||||
if not option_exists(conf, option):
|
||||
log_line('Symbol {} was not found in {}'.format(option, conf.filename), color=colors.red)
|
||||
return False
|
||||
|
||||
if value is False:
|
||||
log_command(['config.py', 'unset', option])
|
||||
conf.unset(option)
|
||||
elif value is True:
|
||||
log_command(['config.py', 'set', option])
|
||||
conf.set(option)
|
||||
else:
|
||||
log_command(['config.py', 'set', option, value])
|
||||
conf.set(option, value)
|
||||
return True
|
||||
|
||||
def set_reference_config(conf, options, colors):
|
||||
"""Change the library configuration file (mbedtls_config.h) to the reference state.
|
||||
The reference state is the one from which the tested configurations are
|
||||
derived."""
|
||||
# Turn off options that are not relevant to the tests and slow them down.
|
||||
log_command(['config.py', 'full'])
|
||||
conf.adapt(config.full_adapter)
|
||||
set_config_option_value(conf, 'MBEDTLS_TEST_HOOKS', colors, False)
|
||||
set_config_option_value(conf, 'MBEDTLS_PSA_CRYPTO_CONFIG', colors, False)
|
||||
if options.unset_use_psa:
|
||||
set_config_option_value(conf, 'MBEDTLS_USE_PSA_CRYPTO', colors, False)
|
||||
|
||||
class Job:
|
||||
"""A job builds the library in a specific configuration and runs some tests."""
|
||||
def __init__(self, name, config_settings, commands):
|
||||
"""Build a job object.
|
||||
The job uses the configuration described by config_settings. This is a
|
||||
dictionary where the keys are preprocessor symbols and the values are
|
||||
booleans or strings. A boolean indicates whether or not to #define the
|
||||
symbol. With a string, the symbol is #define'd to that value.
|
||||
After setting the configuration, the job runs the programs specified by
|
||||
commands. This is a list of lists of strings; each list of string is a
|
||||
command name and its arguments and is passed to subprocess.call with
|
||||
shell=False."""
|
||||
self.name = name
|
||||
self.config_settings = config_settings
|
||||
self.commands = commands
|
||||
|
||||
def announce(self, colors, what):
|
||||
'''Announce the start or completion of a job.
|
||||
If what is None, announce the start of the job.
|
||||
If what is True, announce that the job has passed.
|
||||
If what is False, announce that the job has failed.'''
|
||||
if what is True:
|
||||
log_line(self.name + ' PASSED', color=colors.green)
|
||||
elif what is False:
|
||||
log_line(self.name + ' FAILED', color=colors.red)
|
||||
else:
|
||||
log_line('starting ' + self.name, color=colors.cyan)
|
||||
|
||||
def configure(self, conf, options, colors):
|
||||
'''Set library configuration options as required for the job.'''
|
||||
set_reference_config(conf, options, colors)
|
||||
for key, value in sorted(self.config_settings.items()):
|
||||
ret = set_config_option_value(conf, key, colors, value)
|
||||
if ret is False:
|
||||
return False
|
||||
return True
|
||||
|
||||
def test(self, options):
|
||||
'''Run the job's build and test commands.
|
||||
Return True if all the commands succeed and False otherwise.
|
||||
If options.keep_going is false, stop as soon as one command fails. Otherwise
|
||||
run all the commands, except that if the first command fails, none of the
|
||||
other commands are run (typically, the first command is a build command
|
||||
and subsequent commands are tests that cannot run if the build failed).'''
|
||||
built = False
|
||||
success = True
|
||||
for command in self.commands:
|
||||
log_command(command)
|
||||
env = os.environ.copy()
|
||||
if 'MBEDTLS_TEST_CONFIGURATION' in env:
|
||||
env['MBEDTLS_TEST_CONFIGURATION'] += '-' + self.name
|
||||
ret = subprocess.call(command, env=env)
|
||||
if ret != 0:
|
||||
if command[0] not in ['make', options.make_command]:
|
||||
log_line('*** [{}] Error {}'.format(' '.join(command), ret))
|
||||
if not options.keep_going or not built:
|
||||
return False
|
||||
success = False
|
||||
built = True
|
||||
return success
|
||||
|
||||
# If the configuration option A requires B, make sure that
|
||||
# B in REVERSE_DEPENDENCIES[A].
|
||||
# All the information here should be contained in check_config.h. This
|
||||
# file includes a copy because it changes rarely and it would be a pain
|
||||
# to extract automatically.
|
||||
REVERSE_DEPENDENCIES = {
|
||||
'MBEDTLS_AES_C': ['MBEDTLS_CTR_DRBG_C',
|
||||
'MBEDTLS_NIST_KW_C'],
|
||||
'MBEDTLS_CHACHA20_C': ['MBEDTLS_CHACHAPOLY_C'],
|
||||
'MBEDTLS_ECDSA_C': ['MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED'],
|
||||
'MBEDTLS_ECP_C': ['MBEDTLS_ECDSA_C',
|
||||
'MBEDTLS_ECDH_C',
|
||||
'MBEDTLS_ECJPAKE_C',
|
||||
'MBEDTLS_ECP_RESTARTABLE',
|
||||
'MBEDTLS_PK_PARSE_EC_EXTENDED',
|
||||
'MBEDTLS_PK_PARSE_EC_COMPRESSED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
|
||||
'MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED',
|
||||
'MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED'],
|
||||
'MBEDTLS_ECP_DP_SECP256R1_ENABLED': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
|
||||
'MBEDTLS_PKCS1_V21': ['MBEDTLS_X509_RSASSA_PSS_SUPPORT'],
|
||||
'MBEDTLS_PKCS1_V15': ['MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_RSA_ENABLED'],
|
||||
'MBEDTLS_RSA_C': ['MBEDTLS_X509_RSASSA_PSS_SUPPORT',
|
||||
'MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_RSA_ENABLED',
|
||||
'MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED'],
|
||||
'MBEDTLS_SHA256_C': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
|
||||
'MBEDTLS_ENTROPY_FORCE_SHA256',
|
||||
'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT',
|
||||
'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY',
|
||||
'MBEDTLS_LMS_C',
|
||||
'MBEDTLS_LMS_PRIVATE'],
|
||||
'MBEDTLS_SHA512_C': ['MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT',
|
||||
'MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY'],
|
||||
'MBEDTLS_SHA224_C': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
|
||||
'MBEDTLS_ENTROPY_FORCE_SHA256',
|
||||
'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT',
|
||||
'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY'],
|
||||
'MBEDTLS_X509_RSASSA_PSS_SUPPORT': []
|
||||
}
|
||||
|
||||
# If an option is tested in an exclusive test, alter the following defines.
|
||||
# These are not necessarily dependencies, but just minimal required changes
|
||||
# if a given define is the only one enabled from an exclusive group.
|
||||
EXCLUSIVE_GROUPS = {
|
||||
'MBEDTLS_SHA512_C': ['-MBEDTLS_SSL_COOKIE_C',
|
||||
'-MBEDTLS_SSL_TLS_C'],
|
||||
'MBEDTLS_ECP_DP_CURVE448_ENABLED': ['-MBEDTLS_ECDSA_C',
|
||||
'-MBEDTLS_ECDSA_DETERMINISTIC',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
|
||||
'-MBEDTLS_ECJPAKE_C',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
|
||||
'MBEDTLS_ECP_DP_CURVE25519_ENABLED': ['-MBEDTLS_ECDSA_C',
|
||||
'-MBEDTLS_ECDSA_DETERMINISTIC',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
|
||||
'-MBEDTLS_ECJPAKE_C',
|
||||
'-MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
|
||||
'MBEDTLS_ARIA_C': ['-MBEDTLS_CMAC_C'],
|
||||
'MBEDTLS_CAMELLIA_C': ['-MBEDTLS_CMAC_C'],
|
||||
'MBEDTLS_CHACHA20_C': ['-MBEDTLS_CMAC_C', '-MBEDTLS_CCM_C', '-MBEDTLS_GCM_C'],
|
||||
'MBEDTLS_DES_C': ['-MBEDTLS_CCM_C',
|
||||
'-MBEDTLS_GCM_C',
|
||||
'-MBEDTLS_SSL_TICKET_C',
|
||||
'-MBEDTLS_SSL_CONTEXT_SERIALIZATION'],
|
||||
}
|
||||
def handle_exclusive_groups(config_settings, symbol):
|
||||
"""For every symbol tested in an exclusive group check if there are other
|
||||
defines to be altered. """
|
||||
for dep in EXCLUSIVE_GROUPS.get(symbol, []):
|
||||
unset = dep.startswith('-')
|
||||
dep = dep[1:]
|
||||
config_settings[dep] = not unset
|
||||
|
||||
def turn_off_dependencies(config_settings):
|
||||
"""For every option turned off config_settings, also turn off what depends on it.
|
||||
An option O is turned off if config_settings[O] is False."""
|
||||
for key, value in sorted(config_settings.items()):
|
||||
if value is not False:
|
||||
continue
|
||||
for dep in REVERSE_DEPENDENCIES.get(key, []):
|
||||
config_settings[dep] = False
|
||||
|
||||
class BaseDomain: # pylint: disable=too-few-public-methods, unused-argument
|
||||
"""A base class for all domains."""
|
||||
def __init__(self, symbols, commands, exclude):
|
||||
"""Initialize the jobs container"""
|
||||
self.jobs = []
|
||||
|
||||
class ExclusiveDomain(BaseDomain): # pylint: disable=too-few-public-methods
|
||||
"""A domain consisting of a set of conceptually-equivalent settings.
|
||||
Establish a list of configuration symbols. For each symbol, run a test job
|
||||
with this symbol set and the others unset."""
|
||||
def __init__(self, symbols, commands, exclude=None):
|
||||
"""Build a domain for the specified list of configuration symbols.
|
||||
The domain contains a set of jobs that enable one of the elements
|
||||
of symbols and disable the others.
|
||||
Each job runs the specified commands.
|
||||
If exclude is a regular expression, skip generated jobs whose description
|
||||
would match this regular expression."""
|
||||
super().__init__(symbols, commands, exclude)
|
||||
base_config_settings = {}
|
||||
for symbol in symbols:
|
||||
base_config_settings[symbol] = False
|
||||
for symbol in symbols:
|
||||
description = symbol
|
||||
if exclude and re.match(exclude, description):
|
||||
continue
|
||||
config_settings = base_config_settings.copy()
|
||||
config_settings[symbol] = True
|
||||
handle_exclusive_groups(config_settings, symbol)
|
||||
turn_off_dependencies(config_settings)
|
||||
job = Job(description, config_settings, commands)
|
||||
self.jobs.append(job)
|
||||
|
||||
class ComplementaryDomain(BaseDomain): # pylint: disable=too-few-public-methods
|
||||
"""A domain consisting of a set of loosely-related settings.
|
||||
Establish a list of configuration symbols. For each symbol, run a test job
|
||||
with this symbol unset.
|
||||
If exclude is a regular expression, skip generated jobs whose description
|
||||
would match this regular expression."""
|
||||
def __init__(self, symbols, commands, exclude=None):
|
||||
"""Build a domain for the specified list of configuration symbols.
|
||||
Each job in the domain disables one of the specified symbols.
|
||||
Each job runs the specified commands."""
|
||||
super().__init__(symbols, commands, exclude)
|
||||
for symbol in symbols:
|
||||
description = '!' + symbol
|
||||
if exclude and re.match(exclude, description):
|
||||
continue
|
||||
config_settings = {symbol: False}
|
||||
turn_off_dependencies(config_settings)
|
||||
job = Job(description, config_settings, commands)
|
||||
self.jobs.append(job)
|
||||
|
||||
class DualDomain(ExclusiveDomain, ComplementaryDomain): # pylint: disable=too-few-public-methods
|
||||
"""A domain that contains both the ExclusiveDomain and BaseDomain tests.
|
||||
Both parent class __init__ calls are performed in any order and
|
||||
each call adds respective jobs. The job array initialization is done once in
|
||||
BaseDomain, before the parent __init__ calls."""
|
||||
|
||||
class CipherInfo: # pylint: disable=too-few-public-methods
|
||||
"""Collect data about cipher.h."""
|
||||
def __init__(self):
|
||||
self.base_symbols = set()
|
||||
with open('include/mbedtls/cipher.h', encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
m = re.match(r' *MBEDTLS_CIPHER_ID_(\w+),', line)
|
||||
if m and m.group(1) not in ['NONE', 'NULL', '3DES']:
|
||||
self.base_symbols.add('MBEDTLS_' + m.group(1) + '_C')
|
||||
|
||||
class DomainData:
|
||||
"""A container for domains and jobs, used to structurize testing."""
|
||||
def config_symbols_matching(self, regexp):
|
||||
"""List the mbedtls_config.h settings matching regexp."""
|
||||
return [symbol for symbol in self.all_config_symbols
|
||||
if re.match(regexp, symbol)]
|
||||
|
||||
def __init__(self, options, conf):
|
||||
"""Gather data about the library and establish a list of domains to test."""
|
||||
build_command = [options.make_command, 'CFLAGS=-Werror -O2']
|
||||
build_and_test = [build_command, [options.make_command, 'test']]
|
||||
self.all_config_symbols = set(conf.settings.keys())
|
||||
# Find hash modules by name.
|
||||
hash_symbols = self.config_symbols_matching(r'MBEDTLS_(MD|RIPEMD|SHA)[0-9]+_C\Z')
|
||||
# Find elliptic curve enabling macros by name.
|
||||
curve_symbols = self.config_symbols_matching(r'MBEDTLS_ECP_DP_\w+_ENABLED\Z')
|
||||
# Find key exchange enabling macros by name.
|
||||
key_exchange_symbols = self.config_symbols_matching(r'MBEDTLS_KEY_EXCHANGE_\w+_ENABLED\Z')
|
||||
# Find cipher IDs (block permutations and stream ciphers --- chaining
|
||||
# and padding modes are exercised separately) information by parsing
|
||||
# cipher.h, as the information is not readily available in mbedtls_config.h.
|
||||
cipher_info = CipherInfo()
|
||||
# Find block cipher chaining and padding mode enabling macros by name.
|
||||
cipher_chaining_symbols = self.config_symbols_matching(r'MBEDTLS_CIPHER_MODE_\w+\Z')
|
||||
cipher_padding_symbols = self.config_symbols_matching(r'MBEDTLS_CIPHER_PADDING_\w+\Z')
|
||||
self.domains = {
|
||||
# Cipher IDs, chaining modes and padding modes. Run the test suites.
|
||||
'cipher_id': ExclusiveDomain(cipher_info.base_symbols,
|
||||
build_and_test),
|
||||
'cipher_chaining': ExclusiveDomain(cipher_chaining_symbols,
|
||||
build_and_test),
|
||||
'cipher_padding': ExclusiveDomain(cipher_padding_symbols,
|
||||
build_and_test),
|
||||
# Elliptic curves. Run the test suites.
|
||||
'curves': ExclusiveDomain(curve_symbols, build_and_test),
|
||||
# Hash algorithms. Excluding exclusive domains of MD, RIPEMD, SHA1,
|
||||
# SHA224 and SHA384 because MBEDTLS_ENTROPY_C is extensively used
|
||||
# across various modules, but it depends on either SHA256 or SHA512.
|
||||
# As a consequence an "exclusive" test of anything other than SHA256
|
||||
# or SHA512 with MBEDTLS_ENTROPY_C enabled is not possible.
|
||||
'hashes': DualDomain(hash_symbols, build_and_test,
|
||||
exclude=r'MBEDTLS_(MD|RIPEMD|SHA1_)' \
|
||||
'|MBEDTLS_SHA224_' \
|
||||
'|MBEDTLS_SHA384_' \
|
||||
'|MBEDTLS_SHA3_'),
|
||||
# Key exchange types.
|
||||
'kex': ExclusiveDomain(key_exchange_symbols, build_and_test),
|
||||
'pkalgs': ComplementaryDomain(['MBEDTLS_ECDSA_C',
|
||||
'MBEDTLS_ECP_C',
|
||||
'MBEDTLS_PKCS1_V21',
|
||||
'MBEDTLS_PKCS1_V15',
|
||||
'MBEDTLS_RSA_C',
|
||||
'MBEDTLS_X509_RSASSA_PSS_SUPPORT'],
|
||||
build_and_test),
|
||||
}
|
||||
self.jobs = {}
|
||||
for domain in self.domains.values():
|
||||
for job in domain.jobs:
|
||||
self.jobs[job.name] = job
|
||||
|
||||
def get_jobs(self, name):
|
||||
"""Return the list of jobs identified by the given name.
|
||||
A name can either be the name of a domain or the name of one specific job."""
|
||||
if name in self.domains:
|
||||
return sorted(self.domains[name].jobs, key=lambda job: job.name)
|
||||
else:
|
||||
return [self.jobs[name]]
|
||||
|
||||
def run(options, job, conf, colors=NO_COLORS):
|
||||
"""Run the specified job (a Job instance)."""
|
||||
subprocess.check_call([options.make_command, 'clean'])
|
||||
job.announce(colors, None)
|
||||
if not job.configure(conf, options, colors):
|
||||
job.announce(colors, False)
|
||||
return False
|
||||
conf.write()
|
||||
success = job.test(options)
|
||||
job.announce(colors, success)
|
||||
return success
|
||||
|
||||
def run_tests(options, domain_data, conf):
|
||||
"""Run the desired jobs.
|
||||
domain_data should be a DomainData instance that describes the available
|
||||
domains and jobs.
|
||||
Run the jobs listed in options.tasks."""
|
||||
if not hasattr(options, 'config_backup'):
|
||||
options.config_backup = options.config + '.bak'
|
||||
colors = Colors(options)
|
||||
jobs = []
|
||||
failures = []
|
||||
successes = []
|
||||
for name in options.tasks:
|
||||
jobs += domain_data.get_jobs(name)
|
||||
backup_config(options)
|
||||
try:
|
||||
for job in jobs:
|
||||
success = run(options, job, conf, colors=colors)
|
||||
if not success:
|
||||
if options.keep_going:
|
||||
failures.append(job.name)
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
successes.append(job.name)
|
||||
restore_config(options)
|
||||
except:
|
||||
# Restore the configuration, except in stop-on-error mode if there
|
||||
# was an error, where we leave the failing configuration up for
|
||||
# developer convenience.
|
||||
if options.keep_going:
|
||||
restore_config(options)
|
||||
raise
|
||||
if successes:
|
||||
log_line('{} passed'.format(' '.join(successes)), color=colors.bold_green)
|
||||
if failures:
|
||||
log_line('{} FAILED'.format(' '.join(failures)), color=colors.bold_red)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def main():
|
||||
try:
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=
|
||||
"Test Mbed TLS with a subset of algorithms.\n\n"
|
||||
"Example usage:\n"
|
||||
r"./tests/scripts/depends.py \!MBEDTLS_SHA1_C MBEDTLS_SHA256_C""\n"
|
||||
"./tests/scripts/depends.py MBEDTLS_AES_C hashes\n"
|
||||
"./tests/scripts/depends.py cipher_id cipher_chaining\n")
|
||||
parser.add_argument('--color', metavar='WHEN',
|
||||
help='Colorize the output (always/auto/never)',
|
||||
choices=['always', 'auto', 'never'], default='auto')
|
||||
parser.add_argument('-c', '--config', metavar='FILE',
|
||||
help='Configuration file to modify',
|
||||
default='include/mbedtls/mbedtls_config.h')
|
||||
parser.add_argument('-C', '--directory', metavar='DIR',
|
||||
help='Change to this directory before anything else',
|
||||
default='.')
|
||||
parser.add_argument('-k', '--keep-going',
|
||||
help='Try all configurations even if some fail (default)',
|
||||
action='store_true', dest='keep_going', default=True)
|
||||
parser.add_argument('-e', '--no-keep-going',
|
||||
help='Stop as soon as a configuration fails',
|
||||
action='store_false', dest='keep_going')
|
||||
parser.add_argument('--list-jobs',
|
||||
help='List supported jobs and exit',
|
||||
action='append_const', dest='list', const='jobs')
|
||||
parser.add_argument('--list-domains',
|
||||
help='List supported domains and exit',
|
||||
action='append_const', dest='list', const='domains')
|
||||
parser.add_argument('--make-command', metavar='CMD',
|
||||
help='Command to run instead of make (e.g. gmake)',
|
||||
action='store', default='make')
|
||||
parser.add_argument('--unset-use-psa',
|
||||
help='Unset MBEDTLS_USE_PSA_CRYPTO before any test',
|
||||
action='store_true', dest='unset_use_psa')
|
||||
parser.add_argument('tasks', metavar='TASKS', nargs='*',
|
||||
help='The domain(s) or job(s) to test (default: all).',
|
||||
default=True)
|
||||
options = parser.parse_args()
|
||||
os.chdir(options.directory)
|
||||
conf = config.ConfigFile(options.config)
|
||||
domain_data = DomainData(options, conf)
|
||||
|
||||
if options.tasks is True:
|
||||
options.tasks = sorted(domain_data.domains.keys())
|
||||
if options.list:
|
||||
for arg in options.list:
|
||||
for domain_name in sorted(getattr(domain_data, arg).keys()):
|
||||
print(domain_name)
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(0 if run_tests(options, domain_data, conf) else 1)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
traceback.print_exc()
|
||||
sys.exit(3)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
90
externals/mbedtls/tests/scripts/docker_env.sh
vendored
Executable file
90
externals/mbedtls/tests/scripts/docker_env.sh
vendored
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
# docker_env.sh
|
||||
#
|
||||
# Purpose
|
||||
# -------
|
||||
#
|
||||
# This is a helper script to enable running tests under a Docker container,
|
||||
# thus making it easier to get set up as well as isolating test dependencies
|
||||
# (which include legacy/insecure configurations of openssl and gnutls).
|
||||
#
|
||||
# WARNING: the Dockerfile used by this script is no longer maintained! See
|
||||
# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
|
||||
# for the set of Docker images we use on the CI.
|
||||
#
|
||||
# Notes for users
|
||||
# ---------------
|
||||
# This script expects a Linux x86_64 system with a recent version of Docker
|
||||
# installed and available for use, as well as http/https access. If a proxy
|
||||
# server must be used, invoke this script with the usual environment variables
|
||||
# (http_proxy and https_proxy) set appropriately. If an alternate Docker
|
||||
# registry is needed, specify MBEDTLS_DOCKER_REGISTRY to point at the
|
||||
# host name.
|
||||
#
|
||||
#
|
||||
# Running this script directly will check for Docker availability and set up
|
||||
# the Docker image.
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
|
||||
# default values, can be overridden by the environment
|
||||
: ${MBEDTLS_DOCKER_GUEST:=bionic}
|
||||
|
||||
|
||||
DOCKER_IMAGE_TAG="armmbed/mbedtls-test:${MBEDTLS_DOCKER_GUEST}"
|
||||
|
||||
# Make sure docker is available
|
||||
if ! which docker > /dev/null; then
|
||||
echo "Docker is required but doesn't seem to be installed. See https://www.docker.com/ to get started"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Figure out if we need to 'sudo docker'
|
||||
if groups | grep docker > /dev/null; then
|
||||
DOCKER="docker"
|
||||
else
|
||||
echo "Using sudo to invoke docker since you're not a member of the docker group..."
|
||||
DOCKER="sudo docker"
|
||||
fi
|
||||
|
||||
# Figure out the number of processors available
|
||||
if [ "$(uname)" == "Darwin" ]; then
|
||||
NUM_PROC="$(sysctl -n hw.logicalcpu)"
|
||||
else
|
||||
NUM_PROC="$(nproc)"
|
||||
fi
|
||||
|
||||
# Build the Docker image
|
||||
echo "Getting docker image up to date (this may take a few minutes)..."
|
||||
${DOCKER} image build \
|
||||
-t ${DOCKER_IMAGE_TAG} \
|
||||
--cache-from=${DOCKER_IMAGE_TAG} \
|
||||
--build-arg MAKEFLAGS_PARALLEL="-j ${NUM_PROC}" \
|
||||
--network host \
|
||||
${http_proxy+--build-arg http_proxy=${http_proxy}} \
|
||||
${https_proxy+--build-arg https_proxy=${https_proxy}} \
|
||||
${MBEDTLS_DOCKER_REGISTRY+--build-arg MY_REGISTRY="${MBEDTLS_DOCKER_REGISTRY}/"} \
|
||||
tests/docker/${MBEDTLS_DOCKER_GUEST}
|
||||
|
||||
run_in_docker()
|
||||
{
|
||||
ENV_ARGS=""
|
||||
while [ "$1" == "-e" ]; do
|
||||
ENV_ARGS="${ENV_ARGS} $1 $2"
|
||||
shift 2
|
||||
done
|
||||
|
||||
${DOCKER} container run -it --rm \
|
||||
--cap-add SYS_PTRACE \
|
||||
--user "$(id -u):$(id -g)" \
|
||||
--volume $PWD:$PWD \
|
||||
--workdir $PWD \
|
||||
-e MAKEFLAGS \
|
||||
-e PYLINTHOME=/tmp/.pylintd \
|
||||
${ENV_ARGS} \
|
||||
${DOCKER_IMAGE_TAG} \
|
||||
$@
|
||||
}
|
||||
32
externals/mbedtls/tests/scripts/doxygen.sh
vendored
Executable file
32
externals/mbedtls/tests/scripts/doxygen.sh
vendored
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Make sure the doxygen documentation builds without warnings
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
# Abort on errors (and uninitialised variables)
|
||||
set -eu
|
||||
|
||||
if [ -d library -a -d include -a -d tests ]; then :; else
|
||||
echo "Must be run from Mbed TLS root" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if scripts/apidoc_full.sh > doc.out 2>doc.err; then :; else
|
||||
cat doc.err
|
||||
echo "FAIL" >&2
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
cat doc.out doc.err | \
|
||||
grep -v "warning: ignoring unsupported tag" \
|
||||
> doc.filtered
|
||||
|
||||
if grep -E "(warning|error):" doc.filtered; then
|
||||
echo "FAIL" >&2
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
make apidoc_clean
|
||||
rm -f doc.out doc.err doc.filtered
|
||||
96
externals/mbedtls/tests/scripts/gen_ctr_drbg.pl
vendored
Executable file
96
externals/mbedtls/tests/scripts/gen_ctr_drbg.pl
vendored
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST CTR_DRBG.rsp validation file
|
||||
# Only uses AES-256-CTR cases that use a Derivation function
|
||||
# and concats nonce and personalization for initialization.
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
my $line = <TEST_DATA>;
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[AES-256 use df/);
|
||||
|
||||
my $PredictionResistanceStr = get_suite_val("PredictionResistance");
|
||||
my $PredictionResistance = 0;
|
||||
$PredictionResistance = 1 if ($PredictionResistanceStr eq 'True');
|
||||
my $EntropyInputLen = get_suite_val("EntropyInputLen");
|
||||
my $NonceLen = get_suite_val("NonceLen");
|
||||
my $PersonalizationStringLen = get_suite_val("PersonalizationStringLen");
|
||||
my $AdditionalInputLen = get_suite_val("AdditionalInputLen");
|
||||
|
||||
for ($cnt = 0; $cnt < 15; $cnt++)
|
||||
{
|
||||
my $Count = get_val("COUNT");
|
||||
my $EntropyInput = get_val("EntropyInput");
|
||||
my $Nonce = get_val("Nonce");
|
||||
my $PersonalizationString = get_val("PersonalizationString");
|
||||
my $AdditionalInput1 = get_val("AdditionalInput");
|
||||
my $EntropyInputPR1 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
|
||||
my $EntropyInputReseed = get_val("EntropyInputReseed") if ($PredictionResistance == 0);
|
||||
my $AdditionalInputReseed = get_val("AdditionalInputReseed") if ($PredictionResistance == 0);
|
||||
my $AdditionalInput2 = get_val("AdditionalInput");
|
||||
my $EntropyInputPR2 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
|
||||
my $ReturnedBits = get_val("ReturnedBits");
|
||||
|
||||
if ($PredictionResistance == 1)
|
||||
{
|
||||
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
|
||||
print("ctr_drbg_validate_pr");
|
||||
print(":\"$Nonce$PersonalizationString\"");
|
||||
print(":\"$EntropyInput$EntropyInputPR1$EntropyInputPR2\"");
|
||||
print(":\"$AdditionalInput1\"");
|
||||
print(":\"$AdditionalInput2\"");
|
||||
print(":\"$ReturnedBits\"");
|
||||
print("\n\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
|
||||
print("ctr_drbg_validate_nopr");
|
||||
print(":\"$Nonce$PersonalizationString\"");
|
||||
print(":\"$EntropyInput$EntropyInputReseed\"");
|
||||
print(":\"$AdditionalInput1\"");
|
||||
print(":\"$AdditionalInputReseed\"");
|
||||
print(":\"$AdditionalInput2\"");
|
||||
print(":\"$ReturnedBits\"");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
close(TEST_DATA);
|
||||
101
externals/mbedtls/tests/scripts/gen_gcm_decrypt.pl
vendored
Executable file
101
externals/mbedtls/tests/scripts/gen_gcm_decrypt.pl
vendored
Executable file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST gcmDecryptxxx.rsp validation files
|
||||
# Only first 3 of every set used for compile time saving
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[/);
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
last;
|
||||
}
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val_or_fail($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "FAIL";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/ && $line !~ /FAIL/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/) if ($line =~ /=/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
my $key_len = get_suite_val("Keylen");
|
||||
next if ($key_len !~ /\d+/);
|
||||
my $iv_len = get_suite_val("IVlen");
|
||||
my $pt_len = get_suite_val("PTlen");
|
||||
my $add_len = get_suite_val("AADlen");
|
||||
my $tag_len = get_suite_val("Taglen");
|
||||
|
||||
for ($cnt = 0; $cnt < 3; $cnt++)
|
||||
{
|
||||
my $Count = get_val("Count");
|
||||
my $key = get_val("Key");
|
||||
my $iv = get_val("IV");
|
||||
my $ct = get_val("CT");
|
||||
my $add = get_val("AAD");
|
||||
my $tag = get_val("Tag");
|
||||
my $pt = get_val_or_fail("PT");
|
||||
|
||||
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
|
||||
print("gcm_decrypt_and_verify");
|
||||
print(":\"$key\"");
|
||||
print(":\"$ct\"");
|
||||
print(":\"$iv\"");
|
||||
print(":\"$add\"");
|
||||
print(":$tag_len");
|
||||
print(":\"$tag\"");
|
||||
print(":\"$pt\"");
|
||||
print(":0");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
print("GCM Selftest\n");
|
||||
print("gcm_selftest:\n\n");
|
||||
|
||||
close(TEST_DATA);
|
||||
84
externals/mbedtls/tests/scripts/gen_gcm_encrypt.pl
vendored
Executable file
84
externals/mbedtls/tests/scripts/gen_gcm_encrypt.pl
vendored
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Based on NIST gcmEncryptIntIVxxx.rsp validation files
|
||||
# Only first 3 of every set used for compile time saving
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_suite_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^\[/);
|
||||
($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
|
||||
last;
|
||||
}
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub get_val($)
|
||||
{
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
my $line;
|
||||
|
||||
while($line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /=/);
|
||||
last;
|
||||
}
|
||||
|
||||
($val) = ($line =~ /^$name = (\w+)/);
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $cnt = 1;;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
my $key_len = get_suite_val("Keylen");
|
||||
next if ($key_len !~ /\d+/);
|
||||
my $iv_len = get_suite_val("IVlen");
|
||||
my $pt_len = get_suite_val("PTlen");
|
||||
my $add_len = get_suite_val("AADlen");
|
||||
my $tag_len = get_suite_val("Taglen");
|
||||
|
||||
for ($cnt = 0; $cnt < 3; $cnt++)
|
||||
{
|
||||
my $Count = get_val("Count");
|
||||
my $key = get_val("Key");
|
||||
my $pt = get_val("PT");
|
||||
my $add = get_val("AAD");
|
||||
my $iv = get_val("IV");
|
||||
my $ct = get_val("CT");
|
||||
my $tag = get_val("Tag");
|
||||
|
||||
print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
|
||||
print("gcm_encrypt_and_tag");
|
||||
print(":\"$key\"");
|
||||
print(":\"$pt\"");
|
||||
print(":\"$iv\"");
|
||||
print(":\"$add\"");
|
||||
print(":\"$ct\"");
|
||||
print(":$tag_len");
|
||||
print(":\"$tag\"");
|
||||
print(":0");
|
||||
print("\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
print("GCM Selftest\n");
|
||||
print("gcm_selftest:\n\n");
|
||||
|
||||
close(TEST_DATA);
|
||||
74
externals/mbedtls/tests/scripts/gen_pkcs1_v21_sign_verify.pl
vendored
Executable file
74
externals/mbedtls/tests/scripts/gen_pkcs1_v21_sign_verify.pl
vendored
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use strict;
|
||||
|
||||
my $file = shift;
|
||||
|
||||
open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
|
||||
|
||||
sub get_val($$)
|
||||
{
|
||||
my $str = shift;
|
||||
my $name = shift;
|
||||
my $val = "";
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
next if($line !~ /^# $str/);
|
||||
last;
|
||||
}
|
||||
|
||||
while(my $line = <TEST_DATA>)
|
||||
{
|
||||
last if($line eq "\r\n");
|
||||
$val .= $line;
|
||||
}
|
||||
|
||||
$val =~ s/[ \r\n]//g;
|
||||
|
||||
return $val;
|
||||
}
|
||||
|
||||
my $state = 0;
|
||||
my $val_n = "";
|
||||
my $val_e = "";
|
||||
my $val_p = "";
|
||||
my $val_q = "";
|
||||
my $mod = 0;
|
||||
my $cnt = 1;
|
||||
while (my $line = <TEST_DATA>)
|
||||
{
|
||||
next if ($line !~ /^# Example/);
|
||||
|
||||
( $mod ) = ($line =~ /A (\d+)/);
|
||||
$val_n = get_val("RSA modulus n", "N");
|
||||
$val_e = get_val("RSA public exponent e", "E");
|
||||
$val_p = get_val("Prime p", "P");
|
||||
$val_q = get_val("Prime q", "Q");
|
||||
|
||||
for(my $i = 1; $i <= 6; $i++)
|
||||
{
|
||||
my $val_m = get_val("Message to be", "M");
|
||||
my $val_salt = get_val("Salt", "Salt");
|
||||
my $val_sig = get_val("Signature", "Sig");
|
||||
|
||||
print("RSASSA-PSS Signature Example ${cnt}_${i}\n");
|
||||
print("pkcs1_rsassa_pss_sign:$mod:16:\"$val_p\":16:\"$val_q\":16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
|
||||
print(":\"$val_m\"");
|
||||
print(":\"$val_salt\"");
|
||||
print(":\"$val_sig\":0");
|
||||
print("\n\n");
|
||||
|
||||
print("RSASSA-PSS Signature Example ${cnt}_${i} (verify)\n");
|
||||
print("pkcs1_rsassa_pss_verify:$mod:16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
|
||||
print(":\"$val_m\"");
|
||||
print(":\"$val_salt\"");
|
||||
print(":\"$val_sig\":0");
|
||||
print("\n\n");
|
||||
}
|
||||
$cnt++;
|
||||
}
|
||||
close(TEST_DATA);
|
||||
71
externals/mbedtls/tests/scripts/generate-afl-tests.sh
vendored
Executable file
71
externals/mbedtls/tests/scripts/generate-afl-tests.sh
vendored
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script splits the data test files containing the test cases into
|
||||
# individual files (one test case per file) suitable for use with afl
|
||||
# (American Fuzzy Lop). http://lcamtuf.coredump.cx/afl/
|
||||
#
|
||||
# Usage: generate-afl-tests.sh <test data file path>
|
||||
# <test data file path> - should be the path to one of the test suite files
|
||||
# such as 'test_suite_rsa.data'
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
# Abort on errors
|
||||
set -e
|
||||
|
||||
if [ -z $1 ]
|
||||
then
|
||||
echo " [!] No test file specified" >&2
|
||||
echo "Usage: $0 <test data file>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_FILEPATH=$(dirname $1)/$(basename $1)
|
||||
TESTSUITE=$(basename $1 .data)
|
||||
|
||||
THIS_DIR=$(basename $PWD)
|
||||
|
||||
if [ -d ../library -a -d ../include -a -d ../tests -a $THIS_DIR == "tests" ];
|
||||
then :;
|
||||
else
|
||||
echo " [!] Must be run from Mbed TLS tests directory" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEST_TESTCASE_DIR=$TESTSUITE-afl-tests
|
||||
DEST_OUTPUT_DIR=$TESTSUITE-afl-out
|
||||
|
||||
echo " [+] Creating output directories" >&2
|
||||
|
||||
if [ -e $DEST_OUTPUT_DIR/* ];
|
||||
then :
|
||||
echo " [!] Test output files already exist." >&2
|
||||
exit 1
|
||||
else
|
||||
mkdir -p $DEST_OUTPUT_DIR
|
||||
fi
|
||||
|
||||
if [ -e $DEST_TESTCASE_DIR/* ];
|
||||
then :
|
||||
echo " [!] Test output files already exist." >&2
|
||||
else
|
||||
mkdir -p $DEST_TESTCASE_DIR
|
||||
fi
|
||||
|
||||
echo " [+] Creating test cases" >&2
|
||||
cd $DEST_TESTCASE_DIR
|
||||
|
||||
split -p '^\s*$' ../$SRC_FILEPATH
|
||||
|
||||
for f in *;
|
||||
do
|
||||
# Strip out any blank lines (no trim on OS X)
|
||||
sed '/^\s*$/d' $f >testcase_$f
|
||||
rm $f
|
||||
done
|
||||
|
||||
cd ..
|
||||
|
||||
echo " [+] Test cases in $DEST_TESTCASE_DIR" >&2
|
||||
|
||||
187
externals/mbedtls/tests/scripts/generate_bignum_tests.py
vendored
Executable file
187
externals/mbedtls/tests/scripts/generate_bignum_tests.py
vendored
Executable file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for bignum functions.
|
||||
|
||||
With no arguments, generate all test data. With non-option arguments,
|
||||
generate only the specified files.
|
||||
|
||||
Class structure:
|
||||
|
||||
Child classes of test_data_generation.BaseTarget (file targets) represent an output
|
||||
file. These indicate where test cases will be written to, for all subclasses of
|
||||
this target. Multiple file targets should not reuse a `target_basename`.
|
||||
|
||||
Each subclass derived from a file target can either be:
|
||||
- A concrete class, representing a test function, which generates test cases.
|
||||
- An abstract class containing shared methods and attributes, not associated
|
||||
with a test function. An example is BignumOperation, which provides
|
||||
common features used for bignum binary operations.
|
||||
|
||||
Both concrete and abstract subclasses can be derived from, to implement
|
||||
additional test cases (see BignumCmp and BignumCmpAbs for examples of deriving
|
||||
from abstract and concrete classes).
|
||||
|
||||
|
||||
Adding test case generation for a function:
|
||||
|
||||
A subclass representing the test function should be added, deriving from a
|
||||
file target such as BignumTarget. This test class must set/implement the
|
||||
following:
|
||||
- test_function: the function name from the associated .function file.
|
||||
- test_name: a descriptive name or brief summary to refer to the test
|
||||
function.
|
||||
- arguments(): a method to generate the list of arguments required for the
|
||||
test_function.
|
||||
- generate_function_tests(): a method to generate TestCases for the function.
|
||||
This should create instances of the class with required input data, and
|
||||
call `.create_test_case()` to yield the TestCase.
|
||||
|
||||
Additional details and other attributes/methods are given in the documentation
|
||||
of BaseTarget in test_data_generation.py.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import sys
|
||||
|
||||
from abc import ABCMeta
|
||||
from typing import List
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import test_data_generation
|
||||
from mbedtls_dev import bignum_common
|
||||
# Import modules containing additional test classes
|
||||
# Test function classes in these modules will be registered by
|
||||
# the framework
|
||||
from mbedtls_dev import bignum_core, bignum_mod_raw, bignum_mod # pylint: disable=unused-import
|
||||
|
||||
class BignumTarget(test_data_generation.BaseTarget):
|
||||
#pylint: disable=too-few-public-methods
|
||||
"""Target for bignum (legacy) test case generation."""
|
||||
target_basename = 'test_suite_bignum.generated'
|
||||
|
||||
|
||||
class BignumOperation(bignum_common.OperationCommon, BignumTarget,
|
||||
metaclass=ABCMeta):
|
||||
#pylint: disable=abstract-method
|
||||
"""Common features for bignum operations in legacy tests."""
|
||||
unique_combinations_only = True
|
||||
input_values = [
|
||||
"", "0", "-", "-0",
|
||||
"7b", "-7b",
|
||||
"0000000000000000123", "-0000000000000000123",
|
||||
"1230000000000000000", "-1230000000000000000"
|
||||
]
|
||||
|
||||
def description_suffix(self) -> str:
|
||||
#pylint: disable=no-self-use # derived classes need self
|
||||
"""Text to add at the end of the test case description."""
|
||||
return ""
|
||||
|
||||
def description(self) -> str:
|
||||
"""Generate a description for the test case.
|
||||
|
||||
If not set, case_description uses the form A `symbol` B, where symbol
|
||||
is used to represent the operation. Descriptions of each value are
|
||||
generated to provide some context to the test case.
|
||||
"""
|
||||
if not self.case_description:
|
||||
self.case_description = "{} {} {}".format(
|
||||
self.value_description(self.arg_a),
|
||||
self.symbol,
|
||||
self.value_description(self.arg_b)
|
||||
)
|
||||
description_suffix = self.description_suffix()
|
||||
if description_suffix:
|
||||
self.case_description += " " + description_suffix
|
||||
return super().description()
|
||||
|
||||
@staticmethod
|
||||
def value_description(val) -> str:
|
||||
"""Generate a description of the argument val.
|
||||
|
||||
This produces a simple description of the value, which is used in test
|
||||
case naming to add context.
|
||||
"""
|
||||
if val == "":
|
||||
return "0 (null)"
|
||||
if val == "-":
|
||||
return "negative 0 (null)"
|
||||
if val == "0":
|
||||
return "0 (1 limb)"
|
||||
|
||||
if val[0] == "-":
|
||||
tmp = "negative"
|
||||
val = val[1:]
|
||||
else:
|
||||
tmp = "positive"
|
||||
if val[0] == "0":
|
||||
tmp += " with leading zero limb"
|
||||
elif len(val) > 10:
|
||||
tmp = "large " + tmp
|
||||
return tmp
|
||||
|
||||
|
||||
class BignumCmp(BignumOperation):
|
||||
"""Test cases for bignum value comparison."""
|
||||
count = 0
|
||||
test_function = "mpi_cmp_mpi"
|
||||
test_name = "MPI compare"
|
||||
input_cases = [
|
||||
("-2", "-3"),
|
||||
("-2", "-2"),
|
||||
("2b4", "2b5"),
|
||||
("2b5", "2b6")
|
||||
]
|
||||
|
||||
def __init__(self, val_a, val_b) -> None:
|
||||
super().__init__(val_a, val_b)
|
||||
self._result = int(self.int_a > self.int_b) - int(self.int_a < self.int_b)
|
||||
self.symbol = ["<", "==", ">"][self._result + 1]
|
||||
|
||||
def result(self) -> List[str]:
|
||||
return [str(self._result)]
|
||||
|
||||
|
||||
class BignumCmpAbs(BignumCmp):
|
||||
"""Test cases for absolute bignum value comparison."""
|
||||
count = 0
|
||||
test_function = "mpi_cmp_abs"
|
||||
test_name = "MPI compare (abs)"
|
||||
|
||||
def __init__(self, val_a, val_b) -> None:
|
||||
super().__init__(val_a.strip("-"), val_b.strip("-"))
|
||||
|
||||
|
||||
class BignumAdd(BignumOperation):
|
||||
"""Test cases for bignum value addition."""
|
||||
count = 0
|
||||
symbol = "+"
|
||||
test_function = "mpi_add_mpi"
|
||||
test_name = "MPI add"
|
||||
input_cases = bignum_common.combination_pairs(
|
||||
[
|
||||
"1c67967269c6", "9cde3",
|
||||
"-1c67967269c6", "-9cde3",
|
||||
]
|
||||
)
|
||||
|
||||
def __init__(self, val_a: str, val_b: str) -> None:
|
||||
super().__init__(val_a, val_b)
|
||||
self._result = self.int_a + self.int_b
|
||||
|
||||
def description_suffix(self) -> str:
|
||||
if (self.int_a >= 0 and self.int_b >= 0):
|
||||
return "" # obviously positive result or 0
|
||||
if (self.int_a <= 0 and self.int_b <= 0):
|
||||
return "" # obviously negative result or 0
|
||||
# The sign of the result is not obvious, so indicate it
|
||||
return ", result{}0".format('>' if self._result > 0 else
|
||||
'<' if self._result < 0 else '=')
|
||||
|
||||
def result(self) -> List[str]:
|
||||
return [bignum_common.quote_str("{:x}".format(self._result))]
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Use the section of the docstring relevant to the CLI as description
|
||||
test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
|
||||
22
externals/mbedtls/tests/scripts/generate_ecp_tests.py
vendored
Executable file
22
externals/mbedtls/tests/scripts/generate_ecp_tests.py
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for ecp functions.
|
||||
|
||||
The command line usage, class structure and available methods are the same
|
||||
as in generate_bignum_tests.py.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import sys
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import test_data_generation
|
||||
# Import modules containing additional test classes
|
||||
# Test function classes in these modules will be registered by
|
||||
# the framework
|
||||
from mbedtls_dev import ecp # pylint: disable=unused-import
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Use the section of the docstring relevant to the CLI as description
|
||||
test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
|
||||
183
externals/mbedtls/tests/scripts/generate_pkcs7_tests.py
vendored
Executable file
183
externals/mbedtls/tests/scripts/generate_pkcs7_tests.py
vendored
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
|
||||
"""
|
||||
Make fuzz like testing for pkcs7 tests
|
||||
Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
|
||||
- It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
|
||||
function is defined in test_suite_pkcs7.function
|
||||
- This is not meant to be portable code, if anything it is meant to serve as
|
||||
documentation for showing how those ugly tests in test_suite_pkcs7.data were created
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
from os.path import exists
|
||||
|
||||
PKCS7_TEST_FILE = "../suites/test_suite_pkcs7.data"
|
||||
|
||||
class Test: # pylint: disable=too-few-public-methods
|
||||
"""
|
||||
A instance of a test in test_suite_pkcs7.data
|
||||
"""
|
||||
def __init__(self, name, depends, func_call):
|
||||
self.name = name
|
||||
self.depends = depends
|
||||
self.func_call = func_call
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def to_string(self):
|
||||
return "\n" + self.name + "\n" + self.depends + "\n" + self.func_call + "\n"
|
||||
|
||||
class TestData:
|
||||
"""
|
||||
Take in test_suite_pkcs7.data file.
|
||||
Allow for new tests to be added.
|
||||
"""
|
||||
mandatory_dep = "MBEDTLS_MD_CAN_SHA256"
|
||||
test_name = "PKCS7 Parse Failure Invalid ASN1"
|
||||
test_function = "pkcs7_asn1_fail:"
|
||||
def __init__(self, file_name):
|
||||
self.file_name = file_name
|
||||
self.last_test_num, self.old_tests = self.read_test_file(file_name)
|
||||
self.new_tests = []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def read_test_file(self, file):
|
||||
"""
|
||||
Parse the test_suite_pkcs7.data file.
|
||||
"""
|
||||
tests = []
|
||||
if not exists(file):
|
||||
print(file + " Does not exist")
|
||||
sys.exit()
|
||||
with open(file, "r", encoding='UTF-8') as fp:
|
||||
data = fp.read()
|
||||
lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if "depends" in lines[i+1]:
|
||||
tests.append(Test(lines[i], lines[i+1], lines[i+2]))
|
||||
i += 3
|
||||
else:
|
||||
tests.append(Test(lines[i], None, lines[i+1]))
|
||||
i += 2
|
||||
latest_test_num = float(tests[-1].name.split('#')[1])
|
||||
return latest_test_num, tests
|
||||
|
||||
def add(self, name, func_call):
|
||||
self.last_test_num += 1
|
||||
self.new_tests.append(Test(self.test_name + ": " + name + " #" + \
|
||||
str(self.last_test_num), "depends_on:" + self.mandatory_dep, \
|
||||
self.test_function + '"' + func_call + '"'))
|
||||
|
||||
def write_changes(self):
|
||||
with open(self.file_name, 'a', encoding='UTF-8') as fw:
|
||||
fw.write("\n")
|
||||
for t in self.new_tests:
|
||||
fw.write(t.to_string())
|
||||
|
||||
|
||||
def asn1_mutate(data):
|
||||
"""
|
||||
We have been given an asn1 structure representing a pkcs7.
|
||||
We want to return an array of slightly modified versions of this data
|
||||
they should be modified in a way which makes the structure invalid
|
||||
|
||||
We know that asn1 structures are:
|
||||
|---1 byte showing data type---|----byte(s) for length of data---|---data content--|
|
||||
We know that some data types can contain other data types.
|
||||
Return a dictionary of reasons and mutated data types.
|
||||
"""
|
||||
|
||||
# off the bat just add bytes to start and end of the buffer
|
||||
mutations = []
|
||||
reasons = []
|
||||
mutations.append(["00"] + data)
|
||||
reasons.append("Add null byte to start")
|
||||
mutations.append(data + ["00"])
|
||||
reasons.append("Add null byte to end")
|
||||
# for every asn1 entry we should attempt to:
|
||||
# - change the data type tag
|
||||
# - make the length longer than actual
|
||||
# - make the length shorter than actual
|
||||
i = 0
|
||||
while i < len(data):
|
||||
tag_i = i
|
||||
leng_i = tag_i + 1
|
||||
data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
|
||||
if data[leng_i][0] == '8':
|
||||
length = int(''.join(data[leng_i + 1: data_i]), 16)
|
||||
else:
|
||||
length = int(data[leng_i], 16)
|
||||
|
||||
tag = data[tag_i]
|
||||
print("Looking at ans1: offset " + str(i) + " tag = " + tag + \
|
||||
", length = " + str(length)+ ":")
|
||||
print(''.join(data[data_i:data_i+length]))
|
||||
# change tag to something else
|
||||
if tag == "02":
|
||||
# turn integers into octet strings
|
||||
new_tag = "04"
|
||||
else:
|
||||
# turn everything else into an integer
|
||||
new_tag = "02"
|
||||
mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
|
||||
reasons.append("Change tag " + tag + " to " + new_tag)
|
||||
|
||||
# change lengths to too big
|
||||
# skip any edge cases which would cause carry over
|
||||
if int(data[data_i - 1], 16) < 255:
|
||||
new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
|
||||
if len(new_length) == 1:
|
||||
new_length = "0"+new_length
|
||||
mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
|
||||
reasons.append("Change length from " + str(length) + " to " \
|
||||
+ str(length + 1))
|
||||
# we can add another test here for tags that contain other tags \
|
||||
# where they have more data than there containing tags account for
|
||||
if tag in ["30", "a0", "31"]:
|
||||
mutations.append(data[:data_i -1] + [new_length] + \
|
||||
data[data_i:data_i + length] + ["00"] + \
|
||||
data[data_i + length:])
|
||||
reasons.append("Change contents of tag " + tag + " to contain \
|
||||
one unaccounted extra byte")
|
||||
# change lengths to too small
|
||||
if int(data[data_i - 1], 16) > 0:
|
||||
new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
|
||||
if len(new_length) == 1:
|
||||
new_length = "0"+new_length
|
||||
mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
|
||||
reasons.append("Change length from " + str(length) + " to " + str(length - 1))
|
||||
|
||||
# some tag types contain other tag types so we should iterate into the data
|
||||
if tag in ["30", "a0", "31"]:
|
||||
i = data_i
|
||||
else:
|
||||
i = data_i + length
|
||||
|
||||
return list(zip(reasons, mutations))
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("USAGE: " + sys.argv[0] + " <pkcs7_der_file>")
|
||||
sys.exit()
|
||||
|
||||
DATA_FILE = sys.argv[1]
|
||||
TEST_DATA = TestData(PKCS7_TEST_FILE)
|
||||
with open(DATA_FILE, 'rb') as f:
|
||||
DATA_STR = f.read().hex()
|
||||
# make data an array of byte strings eg ['de','ad','be','ef']
|
||||
HEX_DATA = list(map(''.join, [[DATA_STR[i], DATA_STR[i+1]] for i in range(0, len(DATA_STR), \
|
||||
2)]))
|
||||
# returns tuples of test_names and modified data buffers
|
||||
MUT_ARR = asn1_mutate(HEX_DATA)
|
||||
|
||||
print("made " + str(len(MUT_ARR)) + " new tests")
|
||||
for new_test in MUT_ARR:
|
||||
TEST_DATA.add(new_test[0], ''.join(new_test[1]))
|
||||
|
||||
TEST_DATA.write_changes()
|
||||
850
externals/mbedtls/tests/scripts/generate_psa_tests.py
vendored
Executable file
850
externals/mbedtls/tests/scripts/generate_psa_tests.py
vendored
Executable file
@@ -0,0 +1,850 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate test data for PSA cryptographic mechanisms.
|
||||
|
||||
With no arguments, generate all test data. With non-option arguments,
|
||||
generate only the specified files.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import enum
|
||||
import re
|
||||
import sys
|
||||
from typing import Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import crypto_data_tests
|
||||
from mbedtls_dev import crypto_knowledge
|
||||
from mbedtls_dev import macro_collector #pylint: disable=unused-import
|
||||
from mbedtls_dev import psa_information
|
||||
from mbedtls_dev import psa_storage
|
||||
from mbedtls_dev import test_case
|
||||
from mbedtls_dev import test_data_generation
|
||||
|
||||
|
||||
|
||||
def test_case_for_key_type_not_supported(
|
||||
verb: str, key_type: str, bits: int,
|
||||
dependencies: List[str],
|
||||
*args: str,
|
||||
param_descr: str = ''
|
||||
) -> test_case.TestCase:
|
||||
"""Return one test case exercising a key creation method
|
||||
for an unsupported key type or size.
|
||||
"""
|
||||
psa_information.hack_dependencies_not_implemented(dependencies)
|
||||
tc = test_case.TestCase()
|
||||
short_key_type = crypto_knowledge.short_expression(key_type)
|
||||
adverb = 'not' if dependencies else 'never'
|
||||
if param_descr:
|
||||
adverb = param_descr + ' ' + adverb
|
||||
tc.set_description('PSA {} {} {}-bit {} supported'
|
||||
.format(verb, short_key_type, bits, adverb))
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function(verb + '_not_supported')
|
||||
tc.set_arguments([key_type] + list(args))
|
||||
return tc
|
||||
|
||||
class KeyTypeNotSupported:
|
||||
"""Generate test cases for when a key type is not supported."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
|
||||
ALWAYS_SUPPORTED = frozenset([
|
||||
'PSA_KEY_TYPE_DERIVE',
|
||||
'PSA_KEY_TYPE_PASSWORD',
|
||||
'PSA_KEY_TYPE_PASSWORD_HASH',
|
||||
'PSA_KEY_TYPE_RAW_DATA',
|
||||
'PSA_KEY_TYPE_HMAC'
|
||||
])
|
||||
def test_cases_for_key_type_not_supported(
|
||||
self,
|
||||
kt: crypto_knowledge.KeyType,
|
||||
param: Optional[int] = None,
|
||||
param_descr: str = '',
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Return test cases exercising key creation when the given type is unsupported.
|
||||
|
||||
If param is present and not None, emit test cases conditioned on this
|
||||
parameter not being supported. If it is absent or None, emit test cases
|
||||
conditioned on the base type not being supported.
|
||||
"""
|
||||
if kt.name in self.ALWAYS_SUPPORTED:
|
||||
# Don't generate test cases for key types that are always supported.
|
||||
# They would be skipped in all configurations, which is noise.
|
||||
return
|
||||
import_dependencies = [('!' if param is None else '') +
|
||||
psa_information.psa_want_symbol(kt.name)]
|
||||
if kt.params is not None:
|
||||
import_dependencies += [('!' if param == i else '') +
|
||||
psa_information.psa_want_symbol(sym)
|
||||
for i, sym in enumerate(kt.params)]
|
||||
if kt.name.endswith('_PUBLIC_KEY'):
|
||||
generate_dependencies = []
|
||||
else:
|
||||
generate_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
|
||||
import_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'BASIC')
|
||||
for bits in kt.sizes_to_test():
|
||||
yield test_case_for_key_type_not_supported(
|
||||
'import', kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(import_dependencies, bits),
|
||||
test_case.hex_string(kt.key_material(bits)),
|
||||
param_descr=param_descr,
|
||||
)
|
||||
if not generate_dependencies and param is not None:
|
||||
# If generation is impossible for this key type, rather than
|
||||
# supported or not depending on implementation capabilities,
|
||||
# only generate the test case once.
|
||||
continue
|
||||
# For public key we expect that key generation fails with
|
||||
# INVALID_ARGUMENT. It is handled by KeyGenerate class.
|
||||
if not kt.is_public():
|
||||
yield test_case_for_key_type_not_supported(
|
||||
'generate', kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(generate_dependencies, bits),
|
||||
str(bits),
|
||||
param_descr=param_descr,
|
||||
)
|
||||
# To be added: derive
|
||||
|
||||
ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
|
||||
DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_DH_PUBLIC_KEY')
|
||||
|
||||
def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate test cases that exercise the creation of keys of unsupported types."""
|
||||
for key_type in sorted(self.constructors.key_types):
|
||||
if key_type in self.ECC_KEY_TYPES:
|
||||
continue
|
||||
if key_type in self.DH_KEY_TYPES:
|
||||
continue
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
yield from self.test_cases_for_key_type_not_supported(kt)
|
||||
for curve_family in sorted(self.constructors.ecc_curves):
|
||||
for constr in self.ECC_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [curve_family])
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, param_descr='type')
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, 0, param_descr='curve')
|
||||
for dh_family in sorted(self.constructors.dh_groups):
|
||||
for constr in self.DH_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [dh_family])
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, param_descr='type')
|
||||
yield from self.test_cases_for_key_type_not_supported(
|
||||
kt, 0, param_descr='group')
|
||||
|
||||
def test_case_for_key_generation(
|
||||
key_type: str, bits: int,
|
||||
dependencies: List[str],
|
||||
*args: str,
|
||||
result: str = ''
|
||||
) -> test_case.TestCase:
|
||||
"""Return one test case exercising a key generation.
|
||||
"""
|
||||
psa_information.hack_dependencies_not_implemented(dependencies)
|
||||
tc = test_case.TestCase()
|
||||
short_key_type = crypto_knowledge.short_expression(key_type)
|
||||
tc.set_description('PSA {} {}-bit'
|
||||
.format(short_key_type, bits))
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function('generate_key')
|
||||
tc.set_arguments([key_type] + list(args) + [result])
|
||||
|
||||
return tc
|
||||
|
||||
class KeyGenerate:
|
||||
"""Generate positive and negative (invalid argument) test cases for key generation."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
|
||||
ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
|
||||
DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
|
||||
'PSA_KEY_TYPE_DH_PUBLIC_KEY')
|
||||
|
||||
@staticmethod
|
||||
def test_cases_for_key_type_key_generation(
|
||||
kt: crypto_knowledge.KeyType
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Return test cases exercising key generation.
|
||||
|
||||
All key types can be generated except for public keys. For public key
|
||||
PSA_ERROR_INVALID_ARGUMENT status is expected.
|
||||
"""
|
||||
result = 'PSA_SUCCESS'
|
||||
|
||||
import_dependencies = [psa_information.psa_want_symbol(kt.name)]
|
||||
if kt.params is not None:
|
||||
import_dependencies += [psa_information.psa_want_symbol(sym)
|
||||
for i, sym in enumerate(kt.params)]
|
||||
if kt.name.endswith('_PUBLIC_KEY'):
|
||||
# The library checks whether the key type is a public key generically,
|
||||
# before it reaches a point where it needs support for the specific key
|
||||
# type, so it returns INVALID_ARGUMENT for unsupported public key types.
|
||||
generate_dependencies = []
|
||||
result = 'PSA_ERROR_INVALID_ARGUMENT'
|
||||
else:
|
||||
generate_dependencies = \
|
||||
psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
|
||||
for bits in kt.sizes_to_test():
|
||||
if kt.name == 'PSA_KEY_TYPE_RSA_KEY_PAIR':
|
||||
size_dependency = "PSA_VENDOR_RSA_GENERATE_MIN_KEY_BITS <= " + str(bits)
|
||||
test_dependencies = generate_dependencies + [size_dependency]
|
||||
else:
|
||||
test_dependencies = generate_dependencies
|
||||
yield test_case_for_key_generation(
|
||||
kt.expression, bits,
|
||||
psa_information.finish_family_dependencies(test_dependencies, bits),
|
||||
str(bits),
|
||||
result
|
||||
)
|
||||
|
||||
def test_cases_for_key_generation(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate test cases that exercise the generation of keys."""
|
||||
for key_type in sorted(self.constructors.key_types):
|
||||
if key_type in self.ECC_KEY_TYPES:
|
||||
continue
|
||||
if key_type in self.DH_KEY_TYPES:
|
||||
continue
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
for curve_family in sorted(self.constructors.ecc_curves):
|
||||
for constr in self.ECC_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [curve_family])
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
for dh_family in sorted(self.constructors.dh_groups):
|
||||
for constr in self.DH_KEY_TYPES:
|
||||
kt = crypto_knowledge.KeyType(constr, [dh_family])
|
||||
yield from self.test_cases_for_key_type_key_generation(kt)
|
||||
|
||||
class OpFail:
|
||||
"""Generate test cases for operations that must fail."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
|
||||
class Reason(enum.Enum):
|
||||
NOT_SUPPORTED = 0
|
||||
INVALID = 1
|
||||
INCOMPATIBLE = 2
|
||||
PUBLIC = 3
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
self.constructors = info.constructors
|
||||
key_type_expressions = self.constructors.generate_expressions(
|
||||
sorted(self.constructors.key_types)
|
||||
)
|
||||
self.key_types = [crypto_knowledge.KeyType(kt_expr)
|
||||
for kt_expr in key_type_expressions]
|
||||
|
||||
def make_test_case(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
reason: 'Reason',
|
||||
kt: Optional[crypto_knowledge.KeyType] = None,
|
||||
not_deps: FrozenSet[str] = frozenset(),
|
||||
) -> test_case.TestCase:
|
||||
"""Construct a failure test case for a one-key or keyless operation."""
|
||||
#pylint: disable=too-many-arguments,too-many-locals
|
||||
tc = test_case.TestCase()
|
||||
pretty_alg = alg.short_expression()
|
||||
if reason == self.Reason.NOT_SUPPORTED:
|
||||
short_deps = [re.sub(r'PSA_WANT_ALG_', r'', dep)
|
||||
for dep in not_deps]
|
||||
pretty_reason = '!' + '&'.join(sorted(short_deps))
|
||||
else:
|
||||
pretty_reason = reason.name.lower()
|
||||
if kt:
|
||||
key_type = kt.expression
|
||||
pretty_type = kt.short_expression()
|
||||
else:
|
||||
key_type = ''
|
||||
pretty_type = ''
|
||||
tc.set_description('PSA {} {}: {}{}'
|
||||
.format(category.name.lower(),
|
||||
pretty_alg,
|
||||
pretty_reason,
|
||||
' with ' + pretty_type if pretty_type else ''))
|
||||
dependencies = psa_information.automatic_dependencies(alg.base_expression, key_type)
|
||||
dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
|
||||
for i, dep in enumerate(dependencies):
|
||||
if dep in not_deps:
|
||||
dependencies[i] = '!' + dep
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function(category.name.lower() + '_fail')
|
||||
arguments = [] # type: List[str]
|
||||
if kt:
|
||||
key_material = kt.key_material(kt.sizes_to_test()[0])
|
||||
arguments += [key_type, test_case.hex_string(key_material)]
|
||||
arguments.append(alg.expression)
|
||||
if category.is_asymmetric():
|
||||
arguments.append('1' if reason == self.Reason.PUBLIC else '0')
|
||||
error = ('NOT_SUPPORTED' if reason == self.Reason.NOT_SUPPORTED else
|
||||
'INVALID_ARGUMENT')
|
||||
arguments.append('PSA_ERROR_' + error)
|
||||
tc.set_arguments(arguments)
|
||||
return tc
|
||||
|
||||
def no_key_test_cases(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate failure test cases for keyless operations with the specified algorithm."""
|
||||
if alg.can_do(category):
|
||||
# Compatible operation, unsupported algorithm
|
||||
for dep in psa_information.automatic_dependencies(alg.base_expression):
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.NOT_SUPPORTED,
|
||||
not_deps=frozenset([dep]))
|
||||
else:
|
||||
# Incompatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category, self.Reason.INVALID)
|
||||
|
||||
def one_key_test_cases(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
category: crypto_knowledge.AlgorithmCategory,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate failure test cases for one-key operations with the specified algorithm."""
|
||||
for kt in self.key_types:
|
||||
key_is_compatible = kt.can_do(alg)
|
||||
if key_is_compatible and alg.can_do(category):
|
||||
# Compatible key and operation, unsupported algorithm
|
||||
for dep in psa_information.automatic_dependencies(alg.base_expression):
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.NOT_SUPPORTED,
|
||||
kt=kt, not_deps=frozenset([dep]))
|
||||
# Public key for a private-key operation
|
||||
if category.is_asymmetric() and kt.is_public():
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.PUBLIC,
|
||||
kt=kt)
|
||||
elif key_is_compatible:
|
||||
# Compatible key, incompatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.INVALID,
|
||||
kt=kt)
|
||||
elif alg.can_do(category):
|
||||
# Incompatible key, compatible operation, supported algorithm
|
||||
yield self.make_test_case(alg, category,
|
||||
self.Reason.INCOMPATIBLE,
|
||||
kt=kt)
|
||||
else:
|
||||
# Incompatible key and operation. Don't test cases where
|
||||
# multiple things are wrong, to keep the number of test
|
||||
# cases reasonable.
|
||||
pass
|
||||
|
||||
def test_cases_for_algorithm(
|
||||
self,
|
||||
alg: crypto_knowledge.Algorithm,
|
||||
) -> Iterator[test_case.TestCase]:
|
||||
"""Generate operation failure test cases for the specified algorithm."""
|
||||
for category in crypto_knowledge.AlgorithmCategory:
|
||||
if category == crypto_knowledge.AlgorithmCategory.PAKE:
|
||||
# PAKE operations are not implemented yet
|
||||
pass
|
||||
elif category.requires_key():
|
||||
yield from self.one_key_test_cases(alg, category)
|
||||
else:
|
||||
yield from self.no_key_test_cases(alg, category)
|
||||
|
||||
def all_test_cases(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate all test cases for operations that must fail."""
|
||||
algorithms = sorted(self.constructors.algorithms)
|
||||
for expr in self.constructors.generate_expressions(algorithms):
|
||||
alg = crypto_knowledge.Algorithm(expr)
|
||||
yield from self.test_cases_for_algorithm(alg)
|
||||
|
||||
|
||||
class StorageKey(psa_storage.Key):
|
||||
"""Representation of a key for storage format testing."""
|
||||
|
||||
IMPLICIT_USAGE_FLAGS = {
|
||||
'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
|
||||
'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
|
||||
} #type: Dict[str, str]
|
||||
"""Mapping of usage flags to the flags that they imply."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
usage: Iterable[str],
|
||||
without_implicit_usage: Optional[bool] = False,
|
||||
**kwargs
|
||||
) -> None:
|
||||
"""Prepare to generate a key.
|
||||
|
||||
* `usage` : The usage flags used for the key.
|
||||
* `without_implicit_usage`: Flag to define to apply the usage extension
|
||||
"""
|
||||
usage_flags = set(usage)
|
||||
if not without_implicit_usage:
|
||||
for flag in sorted(usage_flags):
|
||||
if flag in self.IMPLICIT_USAGE_FLAGS:
|
||||
usage_flags.add(self.IMPLICIT_USAGE_FLAGS[flag])
|
||||
if usage_flags:
|
||||
usage_expression = ' | '.join(sorted(usage_flags))
|
||||
else:
|
||||
usage_expression = '0'
|
||||
super().__init__(usage=usage_expression, **kwargs)
|
||||
|
||||
class StorageTestData(StorageKey):
|
||||
"""Representation of test case data for storage format testing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
expected_usage: Optional[List[str]] = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
"""Prepare to generate test data
|
||||
|
||||
* `description` : used for the test case names
|
||||
* `expected_usage`: the usage flags generated as the expected usage flags
|
||||
in the test cases. CAn differ from the usage flags
|
||||
stored in the keys because of the usage flags extension.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.description = description #type: str
|
||||
if expected_usage is None:
|
||||
self.expected_usage = self.usage #type: psa_storage.Expr
|
||||
elif expected_usage:
|
||||
self.expected_usage = psa_storage.Expr(' | '.join(expected_usage))
|
||||
else:
|
||||
self.expected_usage = psa_storage.Expr(0)
|
||||
|
||||
class StorageFormat:
|
||||
"""Storage format stability test cases."""
|
||||
|
||||
def __init__(self, info: psa_information.Information, version: int, forward: bool) -> None:
|
||||
"""Prepare to generate test cases for storage format stability.
|
||||
|
||||
* `info`: information about the API. See the `Information` class.
|
||||
* `version`: the storage format version to generate test cases for.
|
||||
* `forward`: if true, generate forward compatibility test cases which
|
||||
save a key and check that its representation is as intended. Otherwise
|
||||
generate backward compatibility test cases which inject a key
|
||||
representation and check that it can be read and used.
|
||||
"""
|
||||
self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
|
||||
self.version = version #type: int
|
||||
self.forward = forward #type: bool
|
||||
|
||||
RSA_OAEP_RE = re.compile(r'PSA_ALG_RSA_OAEP\((.*)\)\Z')
|
||||
BRAINPOOL_RE = re.compile(r'PSA_KEY_TYPE_\w+\(PSA_ECC_FAMILY_BRAINPOOL_\w+\)\Z')
|
||||
@classmethod
|
||||
def exercise_key_with_algorithm(
|
||||
cls,
|
||||
key_type: psa_storage.Expr, bits: int,
|
||||
alg: psa_storage.Expr
|
||||
) -> bool:
|
||||
"""Whether to exercise the given key with the given algorithm.
|
||||
|
||||
Normally only the type and algorithm matter for compatibility, and
|
||||
this is handled in crypto_knowledge.KeyType.can_do(). This function
|
||||
exists to detect exceptional cases. Exceptional cases detected here
|
||||
are not tested in OpFail and should therefore have manually written
|
||||
test cases.
|
||||
"""
|
||||
# Some test keys have the RAW_DATA type and attributes that don't
|
||||
# necessarily make sense. We do this to validate numerical
|
||||
# encodings of the attributes.
|
||||
# Raw data keys have no useful exercise anyway so there is no
|
||||
# loss of test coverage.
|
||||
if key_type.string == 'PSA_KEY_TYPE_RAW_DATA':
|
||||
return False
|
||||
# OAEP requires room for two hashes plus wrapping
|
||||
m = cls.RSA_OAEP_RE.match(alg.string)
|
||||
if m:
|
||||
hash_alg = m.group(1)
|
||||
hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg)
|
||||
key_length = (bits + 7) // 8
|
||||
# Leave enough room for at least one byte of plaintext
|
||||
return key_length > 2 * hash_length + 2
|
||||
# There's nothing wrong with ECC keys on Brainpool curves,
|
||||
# but operations with them are very slow. So we only exercise them
|
||||
# with a single algorithm, not with all possible hashes. We do
|
||||
# exercise other curves with all algorithms so test coverage is
|
||||
# perfectly adequate like this.
|
||||
m = cls.BRAINPOOL_RE.match(key_type.string)
|
||||
if m and alg.string != 'PSA_ALG_ECDSA_ANY':
|
||||
return False
|
||||
return True
|
||||
|
||||
def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
|
||||
"""Construct a storage format test case for the given key.
|
||||
|
||||
If ``forward`` is true, generate a forward compatibility test case:
|
||||
create a key and validate that it has the expected representation.
|
||||
Otherwise generate a backward compatibility test case: inject the
|
||||
key representation into storage and validate that it can be read
|
||||
correctly.
|
||||
"""
|
||||
verb = 'save' if self.forward else 'read'
|
||||
tc = test_case.TestCase()
|
||||
tc.set_description(verb + ' ' + key.description)
|
||||
dependencies = psa_information.automatic_dependencies(
|
||||
key.lifetime.string, key.type.string,
|
||||
key.alg.string, key.alg2.string,
|
||||
)
|
||||
dependencies = psa_information.finish_family_dependencies(dependencies, key.bits)
|
||||
dependencies += psa_information.generate_deps_from_description(key.description)
|
||||
dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
|
||||
tc.set_dependencies(dependencies)
|
||||
tc.set_function('key_storage_' + verb)
|
||||
if self.forward:
|
||||
extra_arguments = []
|
||||
else:
|
||||
flags = []
|
||||
if self.exercise_key_with_algorithm(key.type, key.bits, key.alg):
|
||||
flags.append('TEST_FLAG_EXERCISE')
|
||||
if 'READ_ONLY' in key.lifetime.string:
|
||||
flags.append('TEST_FLAG_READ_ONLY')
|
||||
extra_arguments = [' | '.join(flags) if flags else '0']
|
||||
tc.set_arguments([key.lifetime.string,
|
||||
key.type.string, str(key.bits),
|
||||
key.expected_usage.string,
|
||||
key.alg.string, key.alg2.string,
|
||||
'"' + key.material.hex() + '"',
|
||||
'"' + key.hex() + '"',
|
||||
*extra_arguments])
|
||||
return tc
|
||||
|
||||
def key_for_lifetime(
|
||||
self,
|
||||
lifetime: str,
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key for the given lifetime."""
|
||||
short = lifetime
|
||||
short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
|
||||
r'', short)
|
||||
short = crypto_knowledge.short_expression(short)
|
||||
description = 'lifetime: ' + short
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=lifetime,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=['PSA_KEY_USAGE_EXPORT'], alg=0, alg2=0,
|
||||
material=b'L',
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering lifetimes."""
|
||||
lifetimes = sorted(self.constructors.lifetimes)
|
||||
expressions = self.constructors.generate_expressions(lifetimes)
|
||||
for lifetime in expressions:
|
||||
# Don't attempt to create or load a volatile key in storage
|
||||
if 'VOLATILE' in lifetime:
|
||||
continue
|
||||
# Don't attempt to create a read-only key in storage,
|
||||
# but do attempt to load one.
|
||||
if 'READ_ONLY' in lifetime and self.forward:
|
||||
continue
|
||||
yield self.key_for_lifetime(lifetime)
|
||||
|
||||
def key_for_usage_flags(
|
||||
self,
|
||||
usage_flags: List[str],
|
||||
short: Optional[str] = None,
|
||||
test_implicit_usage: Optional[bool] = True
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key for the given key usage."""
|
||||
extra_desc = ' without implication' if test_implicit_usage else ''
|
||||
description = 'usage' + extra_desc + ': '
|
||||
key1 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
expected_usage=usage_flags,
|
||||
without_implicit_usage=not test_implicit_usage,
|
||||
usage=usage_flags, alg=0, alg2=0,
|
||||
material=b'K',
|
||||
description=description)
|
||||
if short is None:
|
||||
usage_expr = key1.expected_usage.string
|
||||
key1.description += crypto_knowledge.short_expression(usage_expr)
|
||||
else:
|
||||
key1.description += short
|
||||
return key1
|
||||
|
||||
def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering usage flags."""
|
||||
known_flags = sorted(self.constructors.key_usage_flags)
|
||||
yield self.key_for_usage_flags(['0'], **kwargs)
|
||||
for usage_flag in known_flags:
|
||||
yield self.key_for_usage_flags([usage_flag], **kwargs)
|
||||
for flag1, flag2 in zip(known_flags,
|
||||
known_flags[1:] + [known_flags[0]]):
|
||||
yield self.key_for_usage_flags([flag1, flag2], **kwargs)
|
||||
|
||||
def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
known_flags = sorted(self.constructors.key_usage_flags)
|
||||
yield self.key_for_usage_flags(known_flags, short='all known')
|
||||
|
||||
def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
yield from self.generate_keys_for_usage_flags()
|
||||
yield from self.generate_key_for_all_usage_flags()
|
||||
|
||||
def key_for_type_and_alg(
|
||||
self,
|
||||
kt: crypto_knowledge.KeyType,
|
||||
bits: int,
|
||||
alg: Optional[crypto_knowledge.Algorithm] = None,
|
||||
) -> StorageTestData:
|
||||
"""Construct a test key of the given type.
|
||||
|
||||
If alg is not None, this key allows it.
|
||||
"""
|
||||
usage_flags = ['PSA_KEY_USAGE_EXPORT']
|
||||
alg1 = 0 #type: psa_storage.Exprable
|
||||
alg2 = 0
|
||||
if alg is not None:
|
||||
alg1 = alg.expression
|
||||
usage_flags += alg.usage_flags(public=kt.is_public())
|
||||
key_material = kt.key_material(bits)
|
||||
description = 'type: {} {}-bit'.format(kt.short_expression(1), bits)
|
||||
if alg is not None:
|
||||
description += ', ' + alg.short_expression(1)
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type=kt.expression, bits=bits,
|
||||
usage=usage_flags, alg=alg1, alg2=alg2,
|
||||
material=key_material,
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def keys_for_type(
|
||||
self,
|
||||
key_type: str,
|
||||
all_algorithms: List[crypto_knowledge.Algorithm],
|
||||
) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for the given key type."""
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
for bits in kt.sizes_to_test():
|
||||
# Test a non-exercisable key, as well as exercisable keys for
|
||||
# each compatible algorithm.
|
||||
# To do: test reading a key from storage with an incompatible
|
||||
# or unsupported algorithm.
|
||||
yield self.key_for_type_and_alg(kt, bits)
|
||||
compatible_algorithms = [alg for alg in all_algorithms
|
||||
if kt.can_do(alg)]
|
||||
for alg in compatible_algorithms:
|
||||
yield self.key_for_type_and_alg(kt, bits, alg)
|
||||
|
||||
def all_keys_for_types(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering key types and their representations."""
|
||||
key_types = sorted(self.constructors.key_types)
|
||||
all_algorithms = [crypto_knowledge.Algorithm(alg)
|
||||
for alg in self.constructors.generate_expressions(
|
||||
sorted(self.constructors.algorithms)
|
||||
)]
|
||||
for key_type in self.constructors.generate_expressions(key_types):
|
||||
yield from self.keys_for_type(key_type, all_algorithms)
|
||||
|
||||
def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for the encoding of the specified algorithm."""
|
||||
# These test cases only validate the encoding of algorithms, not
|
||||
# whether the key read from storage is suitable for an operation.
|
||||
# `keys_for_types` generate read tests with an algorithm and a
|
||||
# compatible key.
|
||||
descr = crypto_knowledge.short_expression(alg, 1)
|
||||
usage = ['PSA_KEY_USAGE_EXPORT']
|
||||
key1 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=usage, alg=alg, alg2=0,
|
||||
material=b'K',
|
||||
description='alg: ' + descr)
|
||||
yield key1
|
||||
key2 = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type='PSA_KEY_TYPE_RAW_DATA', bits=8,
|
||||
usage=usage, alg=0, alg2=alg,
|
||||
material=b'L',
|
||||
description='alg2: ' + descr)
|
||||
yield key2
|
||||
|
||||
def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering algorithm encodings."""
|
||||
algorithms = sorted(self.constructors.algorithms)
|
||||
for alg in self.constructors.generate_expressions(algorithms):
|
||||
yield from self.keys_for_algorithm(alg)
|
||||
|
||||
def generate_all_keys(self) -> Iterator[StorageTestData]:
|
||||
"""Generate all keys for the test cases."""
|
||||
yield from self.all_keys_for_lifetimes()
|
||||
yield from self.all_keys_for_usage_flags()
|
||||
yield from self.all_keys_for_types()
|
||||
yield from self.all_keys_for_algorithms()
|
||||
|
||||
def all_test_cases(self) -> Iterator[test_case.TestCase]:
|
||||
"""Generate all storage format test cases."""
|
||||
# First build a list of all keys, then construct all the corresponding
|
||||
# test cases. This allows all required information to be obtained in
|
||||
# one go, which is a significant performance gain as the information
|
||||
# includes numerical values obtained by compiling a C program.
|
||||
all_keys = list(self.generate_all_keys())
|
||||
for key in all_keys:
|
||||
if key.location_value() != 0:
|
||||
# Skip keys with a non-default location, because they
|
||||
# require a driver and we currently have no mechanism to
|
||||
# determine whether a driver is available.
|
||||
continue
|
||||
yield self.make_test_case(key)
|
||||
|
||||
class StorageFormatForward(StorageFormat):
|
||||
"""Storage format stability test cases for forward compatibility."""
|
||||
|
||||
def __init__(self, info: psa_information.Information, version: int) -> None:
|
||||
super().__init__(info, version, True)
|
||||
|
||||
class StorageFormatV0(StorageFormat):
|
||||
"""Storage format stability test cases for version 0 compatibility."""
|
||||
|
||||
def __init__(self, info: psa_information.Information) -> None:
|
||||
super().__init__(info, 0, False)
|
||||
|
||||
def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys covering usage flags."""
|
||||
yield from super().all_keys_for_usage_flags()
|
||||
yield from self.generate_keys_for_usage_flags(test_implicit_usage=False)
|
||||
|
||||
def keys_for_implicit_usage(
|
||||
self,
|
||||
implyer_usage: str,
|
||||
alg: str,
|
||||
key_type: crypto_knowledge.KeyType
|
||||
) -> StorageTestData:
|
||||
# pylint: disable=too-many-locals
|
||||
"""Generate test keys for the specified implicit usage flag,
|
||||
algorithm and key type combination.
|
||||
"""
|
||||
bits = key_type.sizes_to_test()[0]
|
||||
implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
|
||||
usage_flags = ['PSA_KEY_USAGE_EXPORT']
|
||||
material_usage_flags = usage_flags + [implyer_usage]
|
||||
expected_usage_flags = material_usage_flags + [implicit_usage]
|
||||
alg2 = 0
|
||||
key_material = key_type.key_material(bits)
|
||||
usage_expression = crypto_knowledge.short_expression(implyer_usage, 1)
|
||||
alg_expression = crypto_knowledge.short_expression(alg, 1)
|
||||
key_type_expression = key_type.short_expression(1)
|
||||
description = 'implied by {}: {} {} {}-bit'.format(
|
||||
usage_expression, alg_expression, key_type_expression, bits)
|
||||
key = StorageTestData(version=self.version,
|
||||
id=1, lifetime=0x00000001,
|
||||
type=key_type.expression, bits=bits,
|
||||
usage=material_usage_flags,
|
||||
expected_usage=expected_usage_flags,
|
||||
without_implicit_usage=True,
|
||||
alg=alg, alg2=alg2,
|
||||
material=key_material,
|
||||
description=description)
|
||||
return key
|
||||
|
||||
def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
|
||||
# pylint: disable=too-many-locals
|
||||
"""Match possible key types for sign algorithms."""
|
||||
# To create a valid combination both the algorithms and key types
|
||||
# must be filtered. Pair them with keywords created from its names.
|
||||
incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
|
||||
incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
|
||||
keyword_translation = {
|
||||
'ECDSA': 'ECC',
|
||||
'ED[0-9]*.*' : 'EDWARDS'
|
||||
}
|
||||
exclusive_keywords = {
|
||||
'EDWARDS': 'ECC'
|
||||
}
|
||||
key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
|
||||
algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
|
||||
alg_with_keys = {} #type: Dict[str, List[str]]
|
||||
translation_table = str.maketrans('(', '_', ')')
|
||||
for alg in algorithms:
|
||||
# Generate keywords from the name of the algorithm
|
||||
alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
|
||||
# Translate keywords for better matching with the key types
|
||||
for keyword in alg_keywords.copy():
|
||||
for pattern, replace in keyword_translation.items():
|
||||
if re.match(pattern, keyword):
|
||||
alg_keywords.remove(keyword)
|
||||
alg_keywords.add(replace)
|
||||
# Filter out incompatible algorithms
|
||||
if not alg_keywords.isdisjoint(incompatible_alg_keyword):
|
||||
continue
|
||||
|
||||
for key_type in key_types:
|
||||
# Generate keywords from the of the key type
|
||||
key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
|
||||
|
||||
# Remove ambiguous keywords
|
||||
for keyword1, keyword2 in exclusive_keywords.items():
|
||||
if keyword1 in key_type_keywords:
|
||||
key_type_keywords.remove(keyword2)
|
||||
|
||||
if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
|
||||
not key_type_keywords.isdisjoint(alg_keywords):
|
||||
if alg in alg_with_keys:
|
||||
alg_with_keys[alg].append(key_type)
|
||||
else:
|
||||
alg_with_keys[alg] = [key_type]
|
||||
return alg_with_keys
|
||||
|
||||
def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
|
||||
"""Generate test keys for usage flag extensions."""
|
||||
# Generate a key type and algorithm pair for each extendable usage
|
||||
# flag to generate a valid key for exercising. The key is generated
|
||||
# without usage extension to check the extension compatibility.
|
||||
alg_with_keys = self.gather_key_types_for_sign_alg()
|
||||
|
||||
for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
|
||||
for alg in sorted(alg_with_keys):
|
||||
for key_type in sorted(alg_with_keys[alg]):
|
||||
# The key types must be filtered to fit the specific usage flag.
|
||||
kt = crypto_knowledge.KeyType(key_type)
|
||||
if kt.is_public() and '_SIGN_' in usage:
|
||||
# Can't sign with a public key
|
||||
continue
|
||||
yield self.keys_for_implicit_usage(usage, alg, kt)
|
||||
|
||||
def generate_all_keys(self) -> Iterator[StorageTestData]:
|
||||
yield from super().generate_all_keys()
|
||||
yield from self.all_keys_for_implicit_usage()
|
||||
|
||||
|
||||
class PSATestGenerator(test_data_generation.TestGenerator):
|
||||
"""Test generator subclass including PSA targets and info."""
|
||||
# Note that targets whose names contain 'test_format' have their content
|
||||
# validated by `abi_check.py`.
|
||||
targets = {
|
||||
'test_suite_psa_crypto_generate_key.generated':
|
||||
lambda info: KeyGenerate(info).test_cases_for_key_generation(),
|
||||
'test_suite_psa_crypto_not_supported.generated':
|
||||
lambda info: KeyTypeNotSupported(info).test_cases_for_not_supported(),
|
||||
'test_suite_psa_crypto_low_hash.generated':
|
||||
lambda info: crypto_data_tests.HashPSALowLevel(info).all_test_cases(),
|
||||
'test_suite_psa_crypto_op_fail.generated':
|
||||
lambda info: OpFail(info).all_test_cases(),
|
||||
'test_suite_psa_crypto_storage_format.current':
|
||||
lambda info: StorageFormatForward(info, 0).all_test_cases(),
|
||||
'test_suite_psa_crypto_storage_format.v0':
|
||||
lambda info: StorageFormatV0(info).all_test_cases(),
|
||||
} #type: Dict[str, Callable[[psa_information.Information], Iterable[test_case.TestCase]]]
|
||||
|
||||
def __init__(self, options):
|
||||
super().__init__(options)
|
||||
self.info = psa_information.Information()
|
||||
|
||||
def generate_target(self, name: str, *target_args) -> None:
|
||||
super().generate_target(name, self.info)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_data_generation.main(sys.argv[1:], __doc__, PSATestGenerator)
|
||||
87
externals/mbedtls/tests/scripts/generate_server9_bad_saltlen.py
vendored
Executable file
87
externals/mbedtls/tests/scripts/generate_server9_bad_saltlen.py
vendored
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate server9-bad-saltlen.crt
|
||||
|
||||
Generate a certificate signed with RSA-PSS, with an incorrect salt length.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import subprocess
|
||||
import argparse
|
||||
from asn1crypto import pem, x509, core #type: ignore #pylint: disable=import-error
|
||||
|
||||
OPENSSL_RSA_PSS_CERT_COMMAND = r'''
|
||||
openssl x509 -req -CA {ca_name}.crt -CAkey {ca_name}.key -set_serial 24 {ca_password} \
|
||||
{openssl_extfile} -days 3650 -outform DER -in {csr} \
|
||||
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{anounce_saltlen} \
|
||||
-sigopt rsa_mgf1_md:sha256
|
||||
'''
|
||||
SIG_OPT = \
|
||||
r'-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{saltlen} -sigopt rsa_mgf1_md:sha256'
|
||||
OPENSSL_RSA_PSS_DGST_COMMAND = r'''openssl dgst -sign {ca_name}.key {ca_password} \
|
||||
-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{actual_saltlen} \
|
||||
-sigopt rsa_mgf1_md:sha256'''
|
||||
|
||||
|
||||
def auto_int(x):
|
||||
return int(x, 0)
|
||||
|
||||
|
||||
def build_argparser(parser):
|
||||
"""Build argument parser"""
|
||||
parser.description = __doc__
|
||||
parser.add_argument('--ca-name', type=str, required=True,
|
||||
help='Basename of CA files')
|
||||
parser.add_argument('--ca-password', type=str,
|
||||
required=True, help='CA key file password')
|
||||
parser.add_argument('--csr', type=str, required=True,
|
||||
help='CSR file for generating certificate')
|
||||
parser.add_argument('--openssl-extfile', type=str,
|
||||
required=True, help='X905 v3 extension config file')
|
||||
parser.add_argument('--anounce_saltlen', type=auto_int,
|
||||
required=True, help='Announced salt length')
|
||||
parser.add_argument('--actual_saltlen', type=auto_int,
|
||||
required=True, help='Actual salt length')
|
||||
parser.add_argument('--output', type=str, required=True)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
build_argparser(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
return generate(**vars(args))
|
||||
|
||||
def generate(**kwargs):
|
||||
"""Generate different salt length certificate file."""
|
||||
ca_password = kwargs.get('ca_password', '')
|
||||
if ca_password:
|
||||
kwargs['ca_password'] = r'-passin "pass:{ca_password}"'.format(
|
||||
**kwargs)
|
||||
else:
|
||||
kwargs['ca_password'] = ''
|
||||
extfile = kwargs.get('openssl_extfile', '')
|
||||
if extfile:
|
||||
kwargs['openssl_extfile'] = '-extfile {openssl_extfile}'.format(
|
||||
**kwargs)
|
||||
else:
|
||||
kwargs['openssl_extfile'] = ''
|
||||
|
||||
cmd = OPENSSL_RSA_PSS_CERT_COMMAND.format(**kwargs)
|
||||
der_bytes = subprocess.check_output(cmd, shell=True)
|
||||
target_certificate = x509.Certificate.load(der_bytes)
|
||||
|
||||
cmd = OPENSSL_RSA_PSS_DGST_COMMAND.format(**kwargs)
|
||||
#pylint: disable=unexpected-keyword-arg
|
||||
der_bytes = subprocess.check_output(cmd,
|
||||
input=target_certificate['tbs_certificate'].dump(),
|
||||
shell=True)
|
||||
|
||||
with open(kwargs.get('output'), 'wb') as f:
|
||||
target_certificate['signature_value'] = core.OctetBitString(der_bytes)
|
||||
f.write(pem.armor('CERTIFICATE', target_certificate.dump()))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
95
externals/mbedtls/tests/scripts/generate_test_cert_macros.py
vendored
Executable file
95
externals/mbedtls/tests/scripts/generate_test_cert_macros.py
vendored
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Generate `tests/src/test_certs.h` which includes certficaties/keys/certificate list for testing.
|
||||
"""
|
||||
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import jinja2
|
||||
|
||||
class MacroDefineAction(argparse.Action):
|
||||
#pylint: disable=signature-differs, too-few-public-methods
|
||||
def __call__(self, parser, namespace, values, option_string):
|
||||
if not hasattr(namespace, 'values'):
|
||||
setattr(namespace, 'values', [])
|
||||
macro_name, filename = values
|
||||
if self.dest in ('string', 'binary') and not os.path.exists(filename):
|
||||
raise argparse.ArgumentError(
|
||||
None, '`{}`: Input file does not exist.'.format(filename))
|
||||
namespace.values.append((self.dest, macro_name, filename))
|
||||
|
||||
|
||||
def macro_define_type(value):
|
||||
ret = value.split('=', 1)
|
||||
if len(ret) != 2:
|
||||
raise argparse.ArgumentTypeError(
|
||||
'`{}` is not MACRO=value format'.format(value))
|
||||
return ret
|
||||
|
||||
|
||||
def build_argparser(parser):
|
||||
parser.description = __doc__
|
||||
parser.add_argument('--string', type=macro_define_type, action=MacroDefineAction,
|
||||
metavar='MACRO_NAME=path/to/file', help='PEM to C string. ')
|
||||
parser.add_argument('--binary', type=macro_define_type, action=MacroDefineAction,
|
||||
metavar='MACRO_NAME=path/to/file',
|
||||
help='DER to C arrary.')
|
||||
parser.add_argument('--password', type=macro_define_type, action=MacroDefineAction,
|
||||
metavar='MACRO_NAME=password', help='Password to C string.')
|
||||
parser.add_argument('--output', type=str, required=True)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
build_argparser(parser)
|
||||
args = parser.parse_args()
|
||||
return generate(**vars(args))
|
||||
|
||||
#pylint: disable=dangerous-default-value, unused-argument
|
||||
def generate(values=[], output=None, **kwargs):
|
||||
"""Generate C header file.
|
||||
"""
|
||||
this_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
template_loader = jinja2.FileSystemLoader(
|
||||
searchpath=os.path.join(this_dir, '..', 'data_files'))
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, lstrip_blocks=True, trim_blocks=True)
|
||||
|
||||
def read_as_c_array(filename):
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read(12)
|
||||
while data:
|
||||
yield ', '.join(['{:#04x}'.format(b) for b in data])
|
||||
data = f.read(12)
|
||||
|
||||
def read_lines(filename):
|
||||
with open(filename) as f:
|
||||
try:
|
||||
for line in f:
|
||||
yield line.strip()
|
||||
except:
|
||||
print(filename)
|
||||
raise
|
||||
|
||||
def put_to_column(value, position=0):
|
||||
return ' '*position + value
|
||||
|
||||
template_env.filters['read_as_c_array'] = read_as_c_array
|
||||
template_env.filters['read_lines'] = read_lines
|
||||
template_env.filters['put_to_column'] = put_to_column
|
||||
|
||||
template = template_env.get_template('test_certs.h.jinja2')
|
||||
|
||||
with open(output, 'w') as f:
|
||||
f.write(template.render(macros=values))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
1277
externals/mbedtls/tests/scripts/generate_test_code.py
vendored
Executable file
1277
externals/mbedtls/tests/scripts/generate_test_code.py
vendored
Executable file
File diff suppressed because it is too large
Load Diff
657
externals/mbedtls/tests/scripts/generate_tls13_compat_tests.py
vendored
Executable file
657
externals/mbedtls/tests/scripts/generate_tls13_compat_tests.py
vendored
Executable file
@@ -0,0 +1,657 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# generate_tls13_compat_tests.py
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Generate TLSv1.3 Compat test cases
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
import itertools
|
||||
from collections import namedtuple
|
||||
|
||||
# define certificates configuration entry
|
||||
Certificate = namedtuple("Certificate", ['cafile', 'certfile', 'keyfile'])
|
||||
# define the certificate parameters for signature algorithms
|
||||
CERTIFICATES = {
|
||||
'ecdsa_secp256r1_sha256': Certificate('data_files/test-ca2.crt',
|
||||
'data_files/ecdsa_secp256r1.crt',
|
||||
'data_files/ecdsa_secp256r1.key'),
|
||||
'ecdsa_secp384r1_sha384': Certificate('data_files/test-ca2.crt',
|
||||
'data_files/ecdsa_secp384r1.crt',
|
||||
'data_files/ecdsa_secp384r1.key'),
|
||||
'ecdsa_secp521r1_sha512': Certificate('data_files/test-ca2.crt',
|
||||
'data_files/ecdsa_secp521r1.crt',
|
||||
'data_files/ecdsa_secp521r1.key'),
|
||||
'rsa_pss_rsae_sha256': Certificate('data_files/test-ca_cat12.crt',
|
||||
'data_files/server2-sha256.crt', 'data_files/server2.key'
|
||||
)
|
||||
}
|
||||
|
||||
CIPHER_SUITE_IANA_VALUE = {
|
||||
"TLS_AES_128_GCM_SHA256": 0x1301,
|
||||
"TLS_AES_256_GCM_SHA384": 0x1302,
|
||||
"TLS_CHACHA20_POLY1305_SHA256": 0x1303,
|
||||
"TLS_AES_128_CCM_SHA256": 0x1304,
|
||||
"TLS_AES_128_CCM_8_SHA256": 0x1305
|
||||
}
|
||||
|
||||
SIG_ALG_IANA_VALUE = {
|
||||
"ecdsa_secp256r1_sha256": 0x0403,
|
||||
"ecdsa_secp384r1_sha384": 0x0503,
|
||||
"ecdsa_secp521r1_sha512": 0x0603,
|
||||
'rsa_pss_rsae_sha256': 0x0804,
|
||||
}
|
||||
|
||||
NAMED_GROUP_IANA_VALUE = {
|
||||
'secp256r1': 0x17,
|
||||
'secp384r1': 0x18,
|
||||
'secp521r1': 0x19,
|
||||
'x25519': 0x1d,
|
||||
'x448': 0x1e,
|
||||
# Only one finite field group to keep testing time within reasonable bounds.
|
||||
'ffdhe2048': 0x100,
|
||||
}
|
||||
|
||||
class TLSProgram:
|
||||
"""
|
||||
Base class for generate server/client command.
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-arguments
|
||||
def __init__(self, ciphersuite=None, signature_algorithm=None, named_group=None,
|
||||
cert_sig_alg=None, compat_mode=True):
|
||||
self._ciphers = []
|
||||
self._sig_algs = []
|
||||
self._named_groups = []
|
||||
self._cert_sig_algs = []
|
||||
if ciphersuite:
|
||||
self.add_ciphersuites(ciphersuite)
|
||||
if named_group:
|
||||
self.add_named_groups(named_group)
|
||||
if signature_algorithm:
|
||||
self.add_signature_algorithms(signature_algorithm)
|
||||
if cert_sig_alg:
|
||||
self.add_cert_signature_algorithms(cert_sig_alg)
|
||||
self._compat_mode = compat_mode
|
||||
|
||||
# add_ciphersuites should not override by sub class
|
||||
def add_ciphersuites(self, *ciphersuites):
|
||||
self._ciphers.extend(
|
||||
[cipher for cipher in ciphersuites if cipher not in self._ciphers])
|
||||
|
||||
# add_signature_algorithms should not override by sub class
|
||||
def add_signature_algorithms(self, *signature_algorithms):
|
||||
self._sig_algs.extend(
|
||||
[sig_alg for sig_alg in signature_algorithms if sig_alg not in self._sig_algs])
|
||||
|
||||
# add_named_groups should not override by sub class
|
||||
def add_named_groups(self, *named_groups):
|
||||
self._named_groups.extend(
|
||||
[named_group for named_group in named_groups if named_group not in self._named_groups])
|
||||
|
||||
# add_cert_signature_algorithms should not override by sub class
|
||||
def add_cert_signature_algorithms(self, *signature_algorithms):
|
||||
self._cert_sig_algs.extend(
|
||||
[sig_alg for sig_alg in signature_algorithms if sig_alg not in self._cert_sig_algs])
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def pre_checks(self):
|
||||
return []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def cmd(self):
|
||||
if not self._cert_sig_algs:
|
||||
self._cert_sig_algs = list(CERTIFICATES.keys())
|
||||
return self.pre_cmd()
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def post_checks(self):
|
||||
return []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def pre_cmd(self):
|
||||
return ['false']
|
||||
|
||||
# pylint: disable=unused-argument,no-self-use
|
||||
def hrr_post_checks(self, named_group):
|
||||
return []
|
||||
|
||||
|
||||
class OpenSSLBase(TLSProgram):
|
||||
"""
|
||||
Generate base test commands for OpenSSL.
|
||||
"""
|
||||
|
||||
NAMED_GROUP = {
|
||||
'secp256r1': 'P-256',
|
||||
'secp384r1': 'P-384',
|
||||
'secp521r1': 'P-521',
|
||||
'x25519': 'X25519',
|
||||
'x448': 'X448',
|
||||
'ffdhe2048': 'ffdhe2048',
|
||||
}
|
||||
|
||||
def cmd(self):
|
||||
ret = super().cmd()
|
||||
|
||||
if self._ciphers:
|
||||
ciphersuites = ':'.join(self._ciphers)
|
||||
ret += ["-ciphersuites {ciphersuites}".format(ciphersuites=ciphersuites)]
|
||||
|
||||
if self._sig_algs:
|
||||
signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
|
||||
signature_algorithms = ':'.join(signature_algorithms)
|
||||
ret += ["-sigalgs {signature_algorithms}".format(
|
||||
signature_algorithms=signature_algorithms)]
|
||||
|
||||
if self._named_groups:
|
||||
named_groups = ':'.join(
|
||||
map(lambda named_group: self.NAMED_GROUP[named_group], self._named_groups))
|
||||
ret += ["-groups {named_groups}".format(named_groups=named_groups)]
|
||||
|
||||
ret += ['-msg -tls1_3']
|
||||
if not self._compat_mode:
|
||||
ret += ['-no_middlebox']
|
||||
|
||||
return ret
|
||||
|
||||
def pre_checks(self):
|
||||
ret = ["requires_openssl_tls1_3"]
|
||||
|
||||
# ffdh groups require at least openssl 3.0
|
||||
ffdh_groups = ['ffdhe2048']
|
||||
|
||||
if any(x in ffdh_groups for x in self._named_groups):
|
||||
ret = ["requires_openssl_tls1_3_with_ffdh"]
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class OpenSSLServ(OpenSSLBase):
|
||||
"""
|
||||
Generate test commands for OpenSSL server.
|
||||
"""
|
||||
|
||||
def cmd(self):
|
||||
ret = super().cmd()
|
||||
ret += ['-num_tickets 0 -no_resume_ephemeral -no_cache']
|
||||
return ret
|
||||
|
||||
def post_checks(self):
|
||||
return ['-c "HTTP/1.0 200 ok"']
|
||||
|
||||
def pre_cmd(self):
|
||||
ret = ['$O_NEXT_SRV_NO_CERT']
|
||||
for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
|
||||
ret += ['-cert {cert} -key {key}'.format(cert=cert, key=key)]
|
||||
return ret
|
||||
|
||||
|
||||
class OpenSSLCli(OpenSSLBase):
|
||||
"""
|
||||
Generate test commands for OpenSSL client.
|
||||
"""
|
||||
|
||||
def pre_cmd(self):
|
||||
return ['$O_NEXT_CLI_NO_CERT',
|
||||
'-CAfile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
|
||||
|
||||
|
||||
class GnuTLSBase(TLSProgram):
|
||||
"""
|
||||
Generate base test commands for GnuTLS.
|
||||
"""
|
||||
|
||||
CIPHER_SUITE = {
|
||||
'TLS_AES_256_GCM_SHA384': [
|
||||
'AES-256-GCM',
|
||||
'SHA384',
|
||||
'AEAD'],
|
||||
'TLS_AES_128_GCM_SHA256': [
|
||||
'AES-128-GCM',
|
||||
'SHA256',
|
||||
'AEAD'],
|
||||
'TLS_CHACHA20_POLY1305_SHA256': [
|
||||
'CHACHA20-POLY1305',
|
||||
'SHA256',
|
||||
'AEAD'],
|
||||
'TLS_AES_128_CCM_SHA256': [
|
||||
'AES-128-CCM',
|
||||
'SHA256',
|
||||
'AEAD'],
|
||||
'TLS_AES_128_CCM_8_SHA256': [
|
||||
'AES-128-CCM-8',
|
||||
'SHA256',
|
||||
'AEAD']}
|
||||
|
||||
SIGNATURE_ALGORITHM = {
|
||||
'ecdsa_secp256r1_sha256': ['SIGN-ECDSA-SECP256R1-SHA256'],
|
||||
'ecdsa_secp521r1_sha512': ['SIGN-ECDSA-SECP521R1-SHA512'],
|
||||
'ecdsa_secp384r1_sha384': ['SIGN-ECDSA-SECP384R1-SHA384'],
|
||||
'rsa_pss_rsae_sha256': ['SIGN-RSA-PSS-RSAE-SHA256']}
|
||||
|
||||
NAMED_GROUP = {
|
||||
'secp256r1': ['GROUP-SECP256R1'],
|
||||
'secp384r1': ['GROUP-SECP384R1'],
|
||||
'secp521r1': ['GROUP-SECP521R1'],
|
||||
'x25519': ['GROUP-X25519'],
|
||||
'x448': ['GROUP-X448'],
|
||||
'ffdhe2048': ['GROUP-FFDHE2048'],
|
||||
}
|
||||
|
||||
def pre_checks(self):
|
||||
return ["requires_gnutls_tls1_3",
|
||||
"requires_gnutls_next_no_ticket",
|
||||
"requires_gnutls_next_disable_tls13_compat", ]
|
||||
|
||||
def cmd(self):
|
||||
ret = super().cmd()
|
||||
|
||||
priority_string_list = []
|
||||
|
||||
def update_priority_string_list(items, map_table):
|
||||
for item in items:
|
||||
for i in map_table[item]:
|
||||
if i not in priority_string_list:
|
||||
yield i
|
||||
|
||||
if self._ciphers:
|
||||
priority_string_list.extend(update_priority_string_list(
|
||||
self._ciphers, self.CIPHER_SUITE))
|
||||
else:
|
||||
priority_string_list.extend(['CIPHER-ALL', 'MAC-ALL'])
|
||||
|
||||
if self._sig_algs:
|
||||
signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
|
||||
priority_string_list.extend(update_priority_string_list(
|
||||
signature_algorithms, self.SIGNATURE_ALGORITHM))
|
||||
else:
|
||||
priority_string_list.append('SIGN-ALL')
|
||||
|
||||
|
||||
if self._named_groups:
|
||||
priority_string_list.extend(update_priority_string_list(
|
||||
self._named_groups, self.NAMED_GROUP))
|
||||
else:
|
||||
priority_string_list.append('GROUP-ALL')
|
||||
|
||||
priority_string_list = ['NONE'] + \
|
||||
priority_string_list + ['VERS-TLS1.3']
|
||||
|
||||
priority_string = ':+'.join(priority_string_list)
|
||||
priority_string += ':%NO_TICKETS'
|
||||
|
||||
if not self._compat_mode:
|
||||
priority_string += [':%DISABLE_TLS13_COMPAT_MODE']
|
||||
|
||||
ret += ['--priority={priority_string}'.format(
|
||||
priority_string=priority_string)]
|
||||
return ret
|
||||
|
||||
class GnuTLSServ(GnuTLSBase):
|
||||
"""
|
||||
Generate test commands for GnuTLS server.
|
||||
"""
|
||||
|
||||
def pre_cmd(self):
|
||||
ret = ['$G_NEXT_SRV_NO_CERT', '--http', '--disable-client-cert', '--debug=4']
|
||||
|
||||
for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
|
||||
ret += ['--x509certfile {cert} --x509keyfile {key}'.format(
|
||||
cert=cert, key=key)]
|
||||
return ret
|
||||
|
||||
def post_checks(self):
|
||||
return ['-c "HTTP/1.0 200 OK"']
|
||||
|
||||
|
||||
class GnuTLSCli(GnuTLSBase):
|
||||
"""
|
||||
Generate test commands for GnuTLS client.
|
||||
"""
|
||||
|
||||
def pre_cmd(self):
|
||||
return ['$G_NEXT_CLI_NO_CERT', '--debug=4', '--single-key-share',
|
||||
'--x509cafile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
|
||||
|
||||
|
||||
class MbedTLSBase(TLSProgram):
|
||||
"""
|
||||
Generate base test commands for mbedTLS.
|
||||
"""
|
||||
|
||||
CIPHER_SUITE = {
|
||||
'TLS_AES_256_GCM_SHA384': 'TLS1-3-AES-256-GCM-SHA384',
|
||||
'TLS_AES_128_GCM_SHA256': 'TLS1-3-AES-128-GCM-SHA256',
|
||||
'TLS_CHACHA20_POLY1305_SHA256': 'TLS1-3-CHACHA20-POLY1305-SHA256',
|
||||
'TLS_AES_128_CCM_SHA256': 'TLS1-3-AES-128-CCM-SHA256',
|
||||
'TLS_AES_128_CCM_8_SHA256': 'TLS1-3-AES-128-CCM-8-SHA256'}
|
||||
|
||||
def cmd(self):
|
||||
ret = super().cmd()
|
||||
ret += ['debug_level=4']
|
||||
|
||||
|
||||
if self._ciphers:
|
||||
ciphers = ','.join(
|
||||
map(lambda cipher: self.CIPHER_SUITE[cipher], self._ciphers))
|
||||
ret += ["force_ciphersuite={ciphers}".format(ciphers=ciphers)]
|
||||
|
||||
if self._sig_algs + self._cert_sig_algs:
|
||||
ret += ['sig_algs={sig_algs}'.format(
|
||||
sig_algs=','.join(set(self._sig_algs + self._cert_sig_algs)))]
|
||||
|
||||
if self._named_groups:
|
||||
named_groups = ','.join(self._named_groups)
|
||||
ret += ["groups={named_groups}".format(named_groups=named_groups)]
|
||||
return ret
|
||||
|
||||
#pylint: disable=missing-function-docstring
|
||||
def add_ffdh_group_requirements(self, requirement_list):
|
||||
if 'ffdhe2048' in self._named_groups:
|
||||
requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
|
||||
if 'ffdhe3072' in self._named_groups:
|
||||
requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
|
||||
if 'ffdhe4096' in self._named_groups:
|
||||
requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
|
||||
if 'ffdhe6144' in self._named_groups:
|
||||
requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
|
||||
if 'ffdhe8192' in self._named_groups:
|
||||
requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
|
||||
|
||||
def pre_checks(self):
|
||||
ret = ['requires_config_enabled MBEDTLS_DEBUG_C',
|
||||
'requires_config_enabled MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED']
|
||||
|
||||
if self._compat_mode:
|
||||
ret += ['requires_config_enabled MBEDTLS_SSL_TLS1_3_COMPATIBILITY_MODE']
|
||||
|
||||
if 'rsa_pss_rsae_sha256' in self._sig_algs + self._cert_sig_algs:
|
||||
ret.append(
|
||||
'requires_config_enabled MBEDTLS_X509_RSASSA_PSS_SUPPORT')
|
||||
|
||||
ec_groups = ['secp256r1', 'secp384r1', 'secp521r1', 'x25519', 'x448']
|
||||
ffdh_groups = ['ffdhe2048', 'ffdhe3072', 'ffdhe4096', 'ffdhe6144', 'ffdhe8192']
|
||||
|
||||
if any(x in ec_groups for x in self._named_groups):
|
||||
ret.append('requires_config_enabled PSA_WANT_ALG_ECDH')
|
||||
|
||||
if any(x in ffdh_groups for x in self._named_groups):
|
||||
ret.append('requires_config_enabled PSA_WANT_ALG_FFDH')
|
||||
self.add_ffdh_group_requirements(ret)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class MbedTLSServ(MbedTLSBase):
|
||||
"""
|
||||
Generate test commands for mbedTLS server.
|
||||
"""
|
||||
|
||||
def cmd(self):
|
||||
ret = super().cmd()
|
||||
ret += ['tls13_kex_modes=ephemeral cookies=0 tickets=0']
|
||||
return ret
|
||||
|
||||
def pre_checks(self):
|
||||
return ['requires_config_enabled MBEDTLS_SSL_SRV_C'] + super().pre_checks()
|
||||
|
||||
def post_checks(self):
|
||||
check_strings = ["Protocol is TLSv1.3"]
|
||||
if self._ciphers:
|
||||
check_strings.append(
|
||||
"server hello, chosen ciphersuite: {} ( id={:04d} )".format(
|
||||
self.CIPHER_SUITE[self._ciphers[0]],
|
||||
CIPHER_SUITE_IANA_VALUE[self._ciphers[0]]))
|
||||
if self._sig_algs:
|
||||
check_strings.append(
|
||||
"received signature algorithm: 0x{:x}".format(
|
||||
SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
|
||||
|
||||
for named_group in self._named_groups:
|
||||
check_strings += ['got named group: {named_group}({iana_value:04x})'.format(
|
||||
named_group=named_group,
|
||||
iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
|
||||
|
||||
check_strings.append("Certificate verification was skipped")
|
||||
return ['-s "{}"'.format(i) for i in check_strings]
|
||||
|
||||
def pre_cmd(self):
|
||||
ret = ['$P_SRV']
|
||||
for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
|
||||
ret += ['crt_file={cert} key_file={key}'.format(cert=cert, key=key)]
|
||||
return ret
|
||||
|
||||
def hrr_post_checks(self, named_group):
|
||||
return ['-s "HRR selected_group: {:s}"'.format(named_group)]
|
||||
|
||||
|
||||
class MbedTLSCli(MbedTLSBase):
|
||||
"""
|
||||
Generate test commands for mbedTLS client.
|
||||
"""
|
||||
|
||||
def pre_cmd(self):
|
||||
return ['$P_CLI',
|
||||
'ca_file={cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
|
||||
|
||||
def pre_checks(self):
|
||||
return ['requires_config_enabled MBEDTLS_SSL_CLI_C'] + super().pre_checks()
|
||||
|
||||
def hrr_post_checks(self, named_group):
|
||||
ret = ['-c "received HelloRetryRequest message"']
|
||||
ret += ['-c "selected_group ( {:d} )"'.format(NAMED_GROUP_IANA_VALUE[named_group])]
|
||||
return ret
|
||||
|
||||
def post_checks(self):
|
||||
check_strings = ["Protocol is TLSv1.3"]
|
||||
if self._ciphers:
|
||||
check_strings.append(
|
||||
"server hello, chosen ciphersuite: ( {:04x} ) - {}".format(
|
||||
CIPHER_SUITE_IANA_VALUE[self._ciphers[0]],
|
||||
self.CIPHER_SUITE[self._ciphers[0]]))
|
||||
if self._sig_algs:
|
||||
check_strings.append(
|
||||
"Certificate Verify: Signature algorithm ( {:04x} )".format(
|
||||
SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
|
||||
|
||||
for named_group in self._named_groups:
|
||||
check_strings += ['NamedGroup: {named_group} ( {iana_value:x} )'.format(
|
||||
named_group=named_group,
|
||||
iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
|
||||
|
||||
check_strings.append("Verifying peer X.509 certificate... ok")
|
||||
return ['-c "{}"'.format(i) for i in check_strings]
|
||||
|
||||
|
||||
SERVER_CLASSES = {'OpenSSL': OpenSSLServ, 'GnuTLS': GnuTLSServ, 'mbedTLS': MbedTLSServ}
|
||||
CLIENT_CLASSES = {'OpenSSL': OpenSSLCli, 'GnuTLS': GnuTLSCli, 'mbedTLS': MbedTLSCli}
|
||||
|
||||
|
||||
def generate_compat_test(client=None, server=None, cipher=None, named_group=None, sig_alg=None):
|
||||
"""
|
||||
Generate test case with `ssl-opt.sh` format.
|
||||
"""
|
||||
name = 'TLS 1.3 {client[0]}->{server[0]}: {cipher},{named_group},{sig_alg}'.format(
|
||||
client=client, server=server, cipher=cipher[4:], sig_alg=sig_alg, named_group=named_group)
|
||||
|
||||
server_object = SERVER_CLASSES[server](ciphersuite=cipher,
|
||||
named_group=named_group,
|
||||
signature_algorithm=sig_alg,
|
||||
cert_sig_alg=sig_alg)
|
||||
client_object = CLIENT_CLASSES[client](ciphersuite=cipher,
|
||||
named_group=named_group,
|
||||
signature_algorithm=sig_alg,
|
||||
cert_sig_alg=sig_alg)
|
||||
|
||||
cmd = ['run_test "{}"'.format(name),
|
||||
'"{}"'.format(' '.join(server_object.cmd())),
|
||||
'"{}"'.format(' '.join(client_object.cmd())),
|
||||
'0']
|
||||
cmd += server_object.post_checks()
|
||||
cmd += client_object.post_checks()
|
||||
cmd += ['-C "received HelloRetryRequest message"']
|
||||
prefix = ' \\\n' + (' '*9)
|
||||
cmd = prefix.join(cmd)
|
||||
return '\n'.join(server_object.pre_checks() + client_object.pre_checks() + [cmd])
|
||||
|
||||
|
||||
def generate_hrr_compat_test(client=None, server=None,
|
||||
client_named_group=None, server_named_group=None,
|
||||
cert_sig_alg=None):
|
||||
"""
|
||||
Generate Hello Retry Request test case with `ssl-opt.sh` format.
|
||||
"""
|
||||
name = 'TLS 1.3 {client[0]}->{server[0]}: HRR {c_named_group} -> {s_named_group}'.format(
|
||||
client=client, server=server, c_named_group=client_named_group,
|
||||
s_named_group=server_named_group)
|
||||
server_object = SERVER_CLASSES[server](named_group=server_named_group,
|
||||
cert_sig_alg=cert_sig_alg)
|
||||
|
||||
client_object = CLIENT_CLASSES[client](named_group=client_named_group,
|
||||
cert_sig_alg=cert_sig_alg)
|
||||
client_object.add_named_groups(server_named_group)
|
||||
|
||||
cmd = ['run_test "{}"'.format(name),
|
||||
'"{}"'.format(' '.join(server_object.cmd())),
|
||||
'"{}"'.format(' '.join(client_object.cmd())),
|
||||
'0']
|
||||
cmd += server_object.post_checks()
|
||||
cmd += client_object.post_checks()
|
||||
cmd += server_object.hrr_post_checks(server_named_group)
|
||||
cmd += client_object.hrr_post_checks(server_named_group)
|
||||
prefix = ' \\\n' + (' '*9)
|
||||
cmd = prefix.join(cmd)
|
||||
return '\n'.join(server_object.pre_checks() +
|
||||
client_object.pre_checks() +
|
||||
[cmd])
|
||||
|
||||
SSL_OUTPUT_HEADER = '''#!/bin/sh
|
||||
|
||||
# {filename}
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# List TLS1.3 compat test cases. They are generated by
|
||||
# `{cmd}`.
|
||||
#
|
||||
# PLEASE DO NOT EDIT THIS FILE. IF NEEDED, PLEASE MODIFY `generate_tls13_compat_tests.py`
|
||||
# AND REGENERATE THIS FILE.
|
||||
#
|
||||
'''
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function of this program
|
||||
"""
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument('-o', '--output', nargs='?',
|
||||
default=None, help='Output file path if `-a` was set')
|
||||
|
||||
parser.add_argument('-a', '--generate-all-tls13-compat-tests', action='store_true',
|
||||
default=False, help='Generate all available tls13 compat tests')
|
||||
|
||||
parser.add_argument('--list-ciphers', action='store_true',
|
||||
default=False, help='List supported ciphersuites')
|
||||
|
||||
parser.add_argument('--list-sig-algs', action='store_true',
|
||||
default=False, help='List supported signature algorithms')
|
||||
|
||||
parser.add_argument('--list-named-groups', action='store_true',
|
||||
default=False, help='List supported named groups')
|
||||
|
||||
parser.add_argument('--list-servers', action='store_true',
|
||||
default=False, help='List supported TLS servers')
|
||||
|
||||
parser.add_argument('--list-clients', action='store_true',
|
||||
default=False, help='List supported TLS Clients')
|
||||
|
||||
parser.add_argument('server', choices=SERVER_CLASSES.keys(), nargs='?',
|
||||
default=list(SERVER_CLASSES.keys())[0],
|
||||
help='Choose TLS server program for test')
|
||||
parser.add_argument('client', choices=CLIENT_CLASSES.keys(), nargs='?',
|
||||
default=list(CLIENT_CLASSES.keys())[0],
|
||||
help='Choose TLS client program for test')
|
||||
parser.add_argument('cipher', choices=CIPHER_SUITE_IANA_VALUE.keys(), nargs='?',
|
||||
default=list(CIPHER_SUITE_IANA_VALUE.keys())[0],
|
||||
help='Choose cipher suite for test')
|
||||
parser.add_argument('sig_alg', choices=SIG_ALG_IANA_VALUE.keys(), nargs='?',
|
||||
default=list(SIG_ALG_IANA_VALUE.keys())[0],
|
||||
help='Choose cipher suite for test')
|
||||
parser.add_argument('named_group', choices=NAMED_GROUP_IANA_VALUE.keys(), nargs='?',
|
||||
default=list(NAMED_GROUP_IANA_VALUE.keys())[0],
|
||||
help='Choose cipher suite for test')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
def get_all_test_cases():
|
||||
# Generate normal compat test cases
|
||||
for client, server, cipher, named_group, sig_alg in \
|
||||
itertools.product(CLIENT_CLASSES.keys(),
|
||||
SERVER_CLASSES.keys(),
|
||||
CIPHER_SUITE_IANA_VALUE.keys(),
|
||||
NAMED_GROUP_IANA_VALUE.keys(),
|
||||
SIG_ALG_IANA_VALUE.keys()):
|
||||
if server == 'mbedTLS' or client == 'mbedTLS':
|
||||
yield generate_compat_test(client=client, server=server,
|
||||
cipher=cipher, named_group=named_group,
|
||||
sig_alg=sig_alg)
|
||||
|
||||
|
||||
# Generate Hello Retry Request compat test cases
|
||||
for client, server, client_named_group, server_named_group in \
|
||||
itertools.product(CLIENT_CLASSES.keys(),
|
||||
SERVER_CLASSES.keys(),
|
||||
NAMED_GROUP_IANA_VALUE.keys(),
|
||||
NAMED_GROUP_IANA_VALUE.keys()):
|
||||
|
||||
if (client == 'mbedTLS' or server == 'mbedTLS') and \
|
||||
client_named_group != server_named_group:
|
||||
yield generate_hrr_compat_test(client=client, server=server,
|
||||
client_named_group=client_named_group,
|
||||
server_named_group=server_named_group,
|
||||
cert_sig_alg="ecdsa_secp256r1_sha256")
|
||||
|
||||
if args.generate_all_tls13_compat_tests:
|
||||
if args.output:
|
||||
with open(args.output, 'w', encoding="utf-8") as f:
|
||||
f.write(SSL_OUTPUT_HEADER.format(
|
||||
filename=os.path.basename(args.output), cmd=' '.join(sys.argv)))
|
||||
f.write('\n\n'.join(get_all_test_cases()))
|
||||
f.write('\n')
|
||||
else:
|
||||
print('\n\n'.join(get_all_test_cases()))
|
||||
return 0
|
||||
|
||||
if args.list_ciphers or args.list_sig_algs or args.list_named_groups \
|
||||
or args.list_servers or args.list_clients:
|
||||
if args.list_ciphers:
|
||||
print(*CIPHER_SUITE_IANA_VALUE.keys())
|
||||
if args.list_sig_algs:
|
||||
print(*SIG_ALG_IANA_VALUE.keys())
|
||||
if args.list_named_groups:
|
||||
print(*NAMED_GROUP_IANA_VALUE.keys())
|
||||
if args.list_servers:
|
||||
print(*SERVER_CLASSES.keys())
|
||||
if args.list_clients:
|
||||
print(*CLIENT_CLASSES.keys())
|
||||
return 0
|
||||
|
||||
print(generate_compat_test(server=args.server, client=args.client, sig_alg=args.sig_alg,
|
||||
cipher=args.cipher, named_group=args.named_group))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
54
externals/mbedtls/tests/scripts/list-identifiers.sh
vendored
Executable file
54
externals/mbedtls/tests/scripts/list-identifiers.sh
vendored
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Create a file named identifiers containing identifiers from internal header
|
||||
# files, based on the --internal flag.
|
||||
# Outputs the line count of the file to stdout.
|
||||
# A very thin wrapper around list_internal_identifiers.py for backwards
|
||||
# compatibility.
|
||||
# Must be run from Mbed TLS root.
|
||||
#
|
||||
# Usage: list-identifiers.sh [ -i | --internal ]
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
set -eu
|
||||
|
||||
if [ -d include/mbedtls ]; then :; else
|
||||
echo "$0: Must be run from Mbed TLS root" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
INTERNAL=""
|
||||
|
||||
until [ -z "${1-}" ]
|
||||
do
|
||||
case "$1" in
|
||||
-i|--internal)
|
||||
INTERNAL="1"
|
||||
;;
|
||||
*)
|
||||
# print error
|
||||
echo "Unknown argument: '$1'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ $INTERNAL ]
|
||||
then
|
||||
tests/scripts/list_internal_identifiers.py
|
||||
wc -l identifiers
|
||||
else
|
||||
cat <<EOF
|
||||
Sorry, this script has to be called with --internal.
|
||||
|
||||
This script exists solely for backwards compatibility with the previous
|
||||
iteration of list-identifiers.sh, of which only the --internal option remains in
|
||||
use. It is a thin wrapper around list_internal_identifiers.py.
|
||||
|
||||
check-names.sh, which used to depend on this script, has been replaced with
|
||||
check_names.py and is now self-complete.
|
||||
EOF
|
||||
fi
|
||||
45
externals/mbedtls/tests/scripts/list_internal_identifiers.py
vendored
Executable file
45
externals/mbedtls/tests/scripts/list_internal_identifiers.py
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
This script generates a file called identifiers that contains all Mbed TLS
|
||||
identifiers found on internal headers. This is the equivalent of what was
|
||||
previously `list-identifiers.sh --internal`, and is useful for generating an
|
||||
exclusion file list for ABI/API checking, since we do not promise compatibility
|
||||
for them.
|
||||
|
||||
It uses the CodeParser class from check_names.py to perform the parsing.
|
||||
|
||||
The script returns 0 on success, 1 if there is a script error.
|
||||
Must be run from Mbed TLS root.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from check_names import CodeParser
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=(
|
||||
"This script writes a list of parsed identifiers in internal "
|
||||
"headers to \"identifiers\". This is useful for generating a list "
|
||||
"of names to exclude from API/ABI compatibility checking. "))
|
||||
|
||||
parser.parse_args()
|
||||
|
||||
name_check = CodeParser(logging.getLogger())
|
||||
result = name_check.parse_identifiers([
|
||||
"include/mbedtls/*_internal.h",
|
||||
"library/*.h"
|
||||
])[0]
|
||||
result.sort(key=lambda x: x.name)
|
||||
|
||||
identifiers = ["{}\n".format(match.name) for match in result]
|
||||
with open("identifiers", "w", encoding="utf-8") as f:
|
||||
f.writelines(identifiers)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
128
externals/mbedtls/tests/scripts/psa_collect_statuses.py
vendored
Executable file
128
externals/mbedtls/tests/scripts/psa_collect_statuses.py
vendored
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Describe the test coverage of PSA functions in terms of return statuses.
|
||||
|
||||
1. Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG
|
||||
2. Run psa_collect_statuses.py
|
||||
|
||||
The output is a series of line of the form "psa_foo PSA_ERROR_XXX". Each
|
||||
function/status combination appears only once.
|
||||
|
||||
This script must be run from the top of an Mbed TLS source tree.
|
||||
The build command is "make -DRECORD_PSA_STATUS_COVERAGE_LOG", which is
|
||||
only supported with make (as opposed to CMake or other build methods).
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
DEFAULT_STATUS_LOG_FILE = 'tests/statuses.log'
|
||||
DEFAULT_PSA_CONSTANT_NAMES = 'programs/psa/psa_constant_names'
|
||||
|
||||
class Statuses:
|
||||
"""Information about observed return statues of API functions."""
|
||||
|
||||
def __init__(self):
|
||||
self.functions = {}
|
||||
self.codes = set()
|
||||
self.status_names = {}
|
||||
|
||||
def collect_log(self, log_file_name):
|
||||
"""Read logs from RECORD_PSA_STATUS_COVERAGE_LOG.
|
||||
|
||||
Read logs produced by running Mbed TLS test suites built with
|
||||
-DRECORD_PSA_STATUS_COVERAGE_LOG.
|
||||
"""
|
||||
with open(log_file_name) as log:
|
||||
for line in log:
|
||||
value, function, tail = line.split(':', 2)
|
||||
if function not in self.functions:
|
||||
self.functions[function] = {}
|
||||
fdata = self.functions[function]
|
||||
if value not in self.functions[function]:
|
||||
fdata[value] = []
|
||||
fdata[value].append(tail)
|
||||
self.codes.add(int(value))
|
||||
|
||||
def get_constant_names(self, psa_constant_names):
|
||||
"""Run psa_constant_names to obtain names for observed numerical values."""
|
||||
values = [str(value) for value in self.codes]
|
||||
cmd = [psa_constant_names, 'status'] + values
|
||||
output = subprocess.check_output(cmd).decode('ascii')
|
||||
for value, name in zip(values, output.rstrip().split('\n')):
|
||||
self.status_names[value] = name
|
||||
|
||||
def report(self):
|
||||
"""Report observed return values for each function.
|
||||
|
||||
The report is a series of line of the form "psa_foo PSA_ERROR_XXX".
|
||||
"""
|
||||
for function in sorted(self.functions.keys()):
|
||||
fdata = self.functions[function]
|
||||
names = [self.status_names[value] for value in fdata.keys()]
|
||||
for name in sorted(names):
|
||||
sys.stdout.write('{} {}\n'.format(function, name))
|
||||
|
||||
def collect_status_logs(options):
|
||||
"""Build and run unit tests and report observed function return statuses.
|
||||
|
||||
Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG, run the
|
||||
test suites and display information about observed return statuses.
|
||||
"""
|
||||
rebuilt = False
|
||||
if not options.use_existing_log and os.path.exists(options.log_file):
|
||||
os.remove(options.log_file)
|
||||
if not os.path.exists(options.log_file):
|
||||
if options.clean_before:
|
||||
subprocess.check_call(['make', 'clean'],
|
||||
cwd='tests',
|
||||
stdout=sys.stderr)
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
make_q_ret = subprocess.call(['make', '-q', 'lib', 'tests'],
|
||||
stdout=devnull, stderr=devnull)
|
||||
if make_q_ret != 0:
|
||||
subprocess.check_call(['make', 'RECORD_PSA_STATUS_COVERAGE_LOG=1'],
|
||||
stdout=sys.stderr)
|
||||
rebuilt = True
|
||||
subprocess.check_call(['make', 'test'],
|
||||
stdout=sys.stderr)
|
||||
data = Statuses()
|
||||
data.collect_log(options.log_file)
|
||||
data.get_constant_names(options.psa_constant_names)
|
||||
if rebuilt and options.clean_after:
|
||||
subprocess.check_call(['make', 'clean'],
|
||||
cwd='tests',
|
||||
stdout=sys.stderr)
|
||||
return data
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=globals()['__doc__'])
|
||||
parser.add_argument('--clean-after',
|
||||
action='store_true',
|
||||
help='Run "make clean" after rebuilding')
|
||||
parser.add_argument('--clean-before',
|
||||
action='store_true',
|
||||
help='Run "make clean" before regenerating the log file)')
|
||||
parser.add_argument('--log-file', metavar='FILE',
|
||||
default=DEFAULT_STATUS_LOG_FILE,
|
||||
help='Log file location (default: {})'.format(
|
||||
DEFAULT_STATUS_LOG_FILE
|
||||
))
|
||||
parser.add_argument('--psa-constant-names', metavar='PROGRAM',
|
||||
default=DEFAULT_PSA_CONSTANT_NAMES,
|
||||
help='Path to psa_constant_names (default: {})'.format(
|
||||
DEFAULT_PSA_CONSTANT_NAMES
|
||||
))
|
||||
parser.add_argument('--use-existing-log', '-e',
|
||||
action='store_true',
|
||||
help='Don\'t regenerate the log file if it exists')
|
||||
options = parser.parse_args()
|
||||
data = collect_status_logs(options)
|
||||
data.report()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
47
externals/mbedtls/tests/scripts/recursion.pl
vendored
Executable file
47
externals/mbedtls/tests/scripts/recursion.pl
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# Find functions making recursive calls to themselves.
|
||||
# (Multiple recursion where a() calls b() which calls a() not covered.)
|
||||
#
|
||||
# When the recursion depth might depend on data controlled by the attacker in
|
||||
# an unbounded way, those functions should use iteration instead.
|
||||
#
|
||||
# Typical usage: scripts/recursion.pl library/*.c
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
use utf8;
|
||||
use open qw(:std utf8);
|
||||
|
||||
# exclude functions that are ok:
|
||||
# - mpi_write_hlp: bounded by size of mbedtls_mpi, a compile-time constant
|
||||
# - x509_crt_verify_child: bounded by MBEDTLS_X509_MAX_INTERMEDIATE_CA
|
||||
my $known_ok = qr/mpi_write_hlp|x509_crt_verify_child/;
|
||||
|
||||
my $cur_name;
|
||||
my $inside;
|
||||
my @funcs;
|
||||
|
||||
die "Usage: $0 file.c [...]\n" unless @ARGV;
|
||||
|
||||
while (<>)
|
||||
{
|
||||
if( /^[^\/#{}\s]/ && ! /\[.*]/ ) {
|
||||
chomp( $cur_name = $_ ) unless $inside;
|
||||
} elsif( /^{/ && $cur_name ) {
|
||||
$inside = 1;
|
||||
$cur_name =~ s/.* ([^ ]*)\(.*/$1/;
|
||||
} elsif( /^}/ && $inside ) {
|
||||
undef $inside;
|
||||
undef $cur_name;
|
||||
} elsif( $inside && /\b\Q$cur_name\E\([^)]/ ) {
|
||||
push @funcs, $cur_name unless /$known_ok/;
|
||||
}
|
||||
}
|
||||
|
||||
print "$_\n" for @funcs;
|
||||
exit @funcs;
|
||||
89
externals/mbedtls/tests/scripts/run-metatests.sh
vendored
Executable file
89
externals/mbedtls/tests/scripts/run-metatests.sh
vendored
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/bin/sh
|
||||
|
||||
help () {
|
||||
cat <<EOF
|
||||
Usage: $0 [OPTION] [PLATFORM]...
|
||||
Run all the metatests whose platform matches any of the given PLATFORM.
|
||||
A PLATFORM can contain shell wildcards.
|
||||
|
||||
Expected output: a lot of scary-looking error messages, since each
|
||||
metatest is expected to report a failure. The final line should be
|
||||
"Ran N metatests, all good."
|
||||
|
||||
If something goes wrong: the final line should be
|
||||
"Ran N metatests, X unexpected successes". Look for "Unexpected success"
|
||||
in the logs above.
|
||||
|
||||
-l List the available metatests, don't run them.
|
||||
EOF
|
||||
}
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
set -e -u
|
||||
|
||||
if [ -d programs ]; then
|
||||
METATEST_PROGRAM=programs/test/metatest
|
||||
elif [ -d ../programs ]; then
|
||||
METATEST_PROGRAM=../programs/test/metatest
|
||||
elif [ -d ../../programs ]; then
|
||||
METATEST_PROGRAM=../../programs/test/metatest
|
||||
else
|
||||
echo >&2 "$0: FATAL: programs/test/metatest not found"
|
||||
exit 120
|
||||
fi
|
||||
|
||||
LIST_ONLY=
|
||||
while getopts hl OPTLET; do
|
||||
case $OPTLET in
|
||||
h) help; exit;;
|
||||
l) LIST_ONLY=1;;
|
||||
\?) help >&2; exit 120;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
list_matches () {
|
||||
while read name platform junk; do
|
||||
for pattern in "$@"; do
|
||||
case $platform in
|
||||
$pattern) echo "$name"; break;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
count=0
|
||||
errors=0
|
||||
run_metatest () {
|
||||
ret=0
|
||||
"$METATEST_PROGRAM" "$1" || ret=$?
|
||||
if [ $ret -eq 0 ]; then
|
||||
echo >&2 "$0: Unexpected success: $1"
|
||||
errors=$((errors + 1))
|
||||
fi
|
||||
count=$((count + 1))
|
||||
}
|
||||
|
||||
# Don't pipe the output of metatest so that if it fails, this script exits
|
||||
# immediately with a failure status.
|
||||
full_list=$("$METATEST_PROGRAM" list)
|
||||
matching_list=$(printf '%s\n' "$full_list" | list_matches "$@")
|
||||
|
||||
if [ -n "$LIST_ONLY" ]; then
|
||||
printf '%s\n' $matching_list
|
||||
exit
|
||||
fi
|
||||
|
||||
for name in $matching_list; do
|
||||
run_metatest "$name"
|
||||
done
|
||||
|
||||
if [ $errors -eq 0 ]; then
|
||||
echo "Ran $count metatests, all good."
|
||||
exit 0
|
||||
else
|
||||
echo "Ran $count metatests, $errors unexpected successes."
|
||||
exit 1
|
||||
fi
|
||||
158
externals/mbedtls/tests/scripts/run-test-suites.pl
vendored
Executable file
158
externals/mbedtls/tests/scripts/run-test-suites.pl
vendored
Executable file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# run-test-suites.pl
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
Execute all the test suites and print a summary of the results.
|
||||
|
||||
run-test-suites.pl [[-v|--verbose] [VERBOSITY]] [--skip=SUITE[...]]
|
||||
|
||||
Options:
|
||||
|
||||
-v|--verbose Print detailed failure information.
|
||||
-v 2|--verbose=2 Print detailed failure information and summary messages.
|
||||
-v 3|--verbose=3 Print detailed information about every test case.
|
||||
--skip=SUITE[,SUITE...]
|
||||
Skip the specified SUITE(s). This option can be used
|
||||
multiple times.
|
||||
|
||||
=cut
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
use utf8;
|
||||
use open qw(:std utf8);
|
||||
|
||||
use Getopt::Long qw(:config auto_help gnu_compat);
|
||||
use Pod::Usage;
|
||||
|
||||
my $verbose = 0;
|
||||
my @skip_patterns = ();
|
||||
GetOptions(
|
||||
'skip=s' => \@skip_patterns,
|
||||
'verbose|v:1' => \$verbose,
|
||||
) or die;
|
||||
|
||||
# All test suites = executable files with a .datax file.
|
||||
my @suites = ();
|
||||
for my $data_file (glob 'test_suite_*.datax') {
|
||||
(my $base = $data_file) =~ s/\.datax$//;
|
||||
push @suites, $base if -x $base;
|
||||
push @suites, "$base.exe" if -e "$base.exe";
|
||||
}
|
||||
die "$0: no test suite found\n" unless @suites;
|
||||
|
||||
# "foo" as a skip pattern skips "test_suite_foo" and "test_suite_foo.bar"
|
||||
# but not "test_suite_foobar".
|
||||
my $skip_re =
|
||||
( '\Atest_suite_(' .
|
||||
join('|', map {
|
||||
s/[ ,;]/|/g; # allow any of " ,;|" as separators
|
||||
s/\./\./g; # "." in the input means ".", not "any character"
|
||||
$_
|
||||
} @skip_patterns) .
|
||||
')(\z|\.)' );
|
||||
|
||||
# in case test suites are linked dynamically
|
||||
$ENV{'LD_LIBRARY_PATH'} = '../library';
|
||||
$ENV{'DYLD_LIBRARY_PATH'} = '../library';
|
||||
|
||||
my $prefix = $^O eq "MSWin32" ? '' : './';
|
||||
|
||||
my (@failed_suites, $total_tests_run, $failed, $suite_cases_passed,
|
||||
$suite_cases_failed, $suite_cases_skipped, $total_cases_passed,
|
||||
$total_cases_failed, $total_cases_skipped );
|
||||
my $suites_skipped = 0;
|
||||
|
||||
sub pad_print_center {
|
||||
my( $width, $padchar, $string ) = @_;
|
||||
my $padlen = ( $width - length( $string ) - 2 ) / 2;
|
||||
print $padchar x( $padlen ), " $string ", $padchar x( $padlen ), "\n";
|
||||
}
|
||||
|
||||
for my $suite (@suites)
|
||||
{
|
||||
print "$suite ", "." x ( 72 - length($suite) - 2 - 4 ), " ";
|
||||
if( $suite =~ /$skip_re/o ) {
|
||||
print "SKIP\n";
|
||||
++$suites_skipped;
|
||||
next;
|
||||
}
|
||||
|
||||
my $command = "$prefix$suite";
|
||||
if( $verbose ) {
|
||||
$command .= ' -v';
|
||||
}
|
||||
my $result = `$command`;
|
||||
|
||||
$suite_cases_passed = () = $result =~ /.. PASS/g;
|
||||
$suite_cases_failed = () = $result =~ /.. FAILED/g;
|
||||
$suite_cases_skipped = () = $result =~ /.. ----/g;
|
||||
|
||||
if( $? == 0 ) {
|
||||
print "PASS\n";
|
||||
if( $verbose > 2 ) {
|
||||
pad_print_center( 72, '-', "Begin $suite" );
|
||||
print $result;
|
||||
pad_print_center( 72, '-', "End $suite" );
|
||||
}
|
||||
} else {
|
||||
push @failed_suites, $suite;
|
||||
print "FAIL\n";
|
||||
if( $verbose ) {
|
||||
pad_print_center( 72, '-', "Begin $suite" );
|
||||
print $result;
|
||||
pad_print_center( 72, '-', "End $suite" );
|
||||
}
|
||||
}
|
||||
|
||||
my ($passed, $tests, $skipped) = $result =~ /([0-9]*) \/ ([0-9]*) tests.*?([0-9]*) skipped/;
|
||||
$total_tests_run += $tests - $skipped;
|
||||
|
||||
if( $verbose > 1 ) {
|
||||
print "(test cases passed:", $suite_cases_passed,
|
||||
" failed:", $suite_cases_failed,
|
||||
" skipped:", $suite_cases_skipped,
|
||||
" of total:", ($suite_cases_passed + $suite_cases_failed +
|
||||
$suite_cases_skipped),
|
||||
")\n"
|
||||
}
|
||||
|
||||
$total_cases_passed += $suite_cases_passed;
|
||||
$total_cases_failed += $suite_cases_failed;
|
||||
$total_cases_skipped += $suite_cases_skipped;
|
||||
}
|
||||
|
||||
print "-" x 72, "\n";
|
||||
print @failed_suites ? "FAILED" : "PASSED";
|
||||
printf( " (%d suites, %d tests run%s)\n",
|
||||
scalar(@suites) - $suites_skipped,
|
||||
$total_tests_run,
|
||||
$suites_skipped ? ", $suites_skipped suites skipped" : "" );
|
||||
|
||||
if( $verbose && @failed_suites ) {
|
||||
# the output can be very long, so provide a summary of which suites failed
|
||||
print " failed suites : @failed_suites\n";
|
||||
}
|
||||
|
||||
if( $verbose > 1 ) {
|
||||
print " test cases passed :", $total_cases_passed, "\n";
|
||||
print " failed :", $total_cases_failed, "\n";
|
||||
print " skipped :", $total_cases_skipped, "\n";
|
||||
print " of tests executed :", ( $total_cases_passed + $total_cases_failed ),
|
||||
"\n";
|
||||
print " of available tests :",
|
||||
( $total_cases_passed + $total_cases_failed + $total_cases_skipped ),
|
||||
"\n";
|
||||
if( $suites_skipped != 0 ) {
|
||||
print "Note: $suites_skipped suites were skipped.\n";
|
||||
}
|
||||
}
|
||||
|
||||
exit( @failed_suites ? 1 : 0 );
|
||||
|
||||
63
externals/mbedtls/tests/scripts/run_demos.py
vendored
Executable file
63
externals/mbedtls/tests/scripts/run_demos.py
vendored
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Run the Mbed TLS demo scripts.
|
||||
"""
|
||||
import argparse
|
||||
import glob
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_demo(demo, quiet=False):
|
||||
"""Run the specified demo script. Return True if it succeeds."""
|
||||
args = {}
|
||||
if quiet:
|
||||
args['stdout'] = subprocess.DEVNULL
|
||||
args['stderr'] = subprocess.DEVNULL
|
||||
returncode = subprocess.call([demo], **args)
|
||||
return returncode == 0
|
||||
|
||||
def run_demos(demos, quiet=False):
|
||||
"""Run the specified demos and print summary information about failures.
|
||||
|
||||
Return True if all demos passed and False if a demo fails.
|
||||
"""
|
||||
failures = []
|
||||
for demo in demos:
|
||||
if not quiet:
|
||||
print('#### {} ####'.format(demo))
|
||||
success = run_demo(demo, quiet=quiet)
|
||||
if not success:
|
||||
failures.append(demo)
|
||||
if not quiet:
|
||||
print('{}: FAIL'.format(demo))
|
||||
if quiet:
|
||||
print('{}: {}'.format(demo, 'PASS' if success else 'FAIL'))
|
||||
else:
|
||||
print('')
|
||||
successes = len(demos) - len(failures)
|
||||
print('{}/{} demos passed'.format(successes, len(demos)))
|
||||
if failures and not quiet:
|
||||
print('Failures:', *failures)
|
||||
return not failures
|
||||
|
||||
def run_all_demos(quiet=False):
|
||||
"""Run all the available demos.
|
||||
|
||||
Return True if all demos passed and False if a demo fails.
|
||||
"""
|
||||
all_demos = glob.glob('programs/*/*_demo.sh')
|
||||
if not all_demos:
|
||||
# Keep the message on one line. pylint: disable=line-too-long
|
||||
raise Exception('No demos found. run_demos needs to operate from the Mbed TLS toplevel directory.')
|
||||
return run_demos(all_demos, quiet=quiet)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--quiet', '-q',
|
||||
action='store_true',
|
||||
help="suppress the output of demos")
|
||||
options = parser.parse_args()
|
||||
success = run_all_demos(quiet=options.quiet)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
17
externals/mbedtls/tests/scripts/scripts_path.py
vendored
Normal file
17
externals/mbedtls/tests/scripts/scripts_path.py
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Add our Python library directory to the module search path.
|
||||
|
||||
Usage:
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__),
|
||||
os.path.pardir, os.path.pardir,
|
||||
'scripts'))
|
||||
284
externals/mbedtls/tests/scripts/set_psa_test_dependencies.py
vendored
Executable file
284
externals/mbedtls/tests/scripts/set_psa_test_dependencies.py
vendored
Executable file
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Edit test cases to use PSA dependencies instead of classic dependencies.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
CLASSIC_DEPENDENCIES = frozenset([
|
||||
# This list is manually filtered from mbedtls_config.h.
|
||||
|
||||
# Mbed TLS feature support.
|
||||
# Only features that affect what can be done are listed here.
|
||||
# Options that control optimizations or alternative implementations
|
||||
# are omitted.
|
||||
'MBEDTLS_CIPHER_MODE_CBC',
|
||||
'MBEDTLS_CIPHER_MODE_CFB',
|
||||
'MBEDTLS_CIPHER_MODE_CTR',
|
||||
'MBEDTLS_CIPHER_MODE_OFB',
|
||||
'MBEDTLS_CIPHER_MODE_XTS',
|
||||
'MBEDTLS_CIPHER_NULL_CIPHER',
|
||||
'MBEDTLS_CIPHER_PADDING_PKCS7',
|
||||
'MBEDTLS_CIPHER_PADDING_ONE_AND_ZEROS',
|
||||
'MBEDTLS_CIPHER_PADDING_ZEROS_AND_LEN',
|
||||
'MBEDTLS_CIPHER_PADDING_ZEROS',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP192R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP224R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP256R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP384R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP521R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP192K1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP224K1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_SECP256K1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_BP256R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_BP384R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_BP512R1_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_CURVE25519_ENABLED',
|
||||
#curve#'MBEDTLS_ECP_DP_CURVE448_ENABLED',
|
||||
'MBEDTLS_ECDSA_DETERMINISTIC',
|
||||
#'MBEDTLS_GENPRIME', #needed for RSA key generation
|
||||
'MBEDTLS_PKCS1_V15',
|
||||
'MBEDTLS_PKCS1_V21',
|
||||
|
||||
# Mbed TLS modules.
|
||||
# Only modules that provide cryptographic mechanisms are listed here.
|
||||
# Platform, data formatting, X.509 or TLS modules are omitted.
|
||||
'MBEDTLS_AES_C',
|
||||
'MBEDTLS_BIGNUM_C',
|
||||
'MBEDTLS_CAMELLIA_C',
|
||||
'MBEDTLS_ARIA_C',
|
||||
'MBEDTLS_CCM_C',
|
||||
'MBEDTLS_CHACHA20_C',
|
||||
'MBEDTLS_CHACHAPOLY_C',
|
||||
'MBEDTLS_CMAC_C',
|
||||
'MBEDTLS_CTR_DRBG_C',
|
||||
'MBEDTLS_DES_C',
|
||||
'MBEDTLS_DHM_C',
|
||||
'MBEDTLS_ECDH_C',
|
||||
'MBEDTLS_ECDSA_C',
|
||||
'MBEDTLS_ECJPAKE_C',
|
||||
'MBEDTLS_ECP_C',
|
||||
'MBEDTLS_ENTROPY_C',
|
||||
'MBEDTLS_GCM_C',
|
||||
'MBEDTLS_HKDF_C',
|
||||
'MBEDTLS_HMAC_DRBG_C',
|
||||
'MBEDTLS_NIST_KW_C',
|
||||
'MBEDTLS_MD5_C',
|
||||
'MBEDTLS_PKCS5_C',
|
||||
'MBEDTLS_PKCS12_C',
|
||||
'MBEDTLS_POLY1305_C',
|
||||
'MBEDTLS_RIPEMD160_C',
|
||||
'MBEDTLS_RSA_C',
|
||||
'MBEDTLS_SHA1_C',
|
||||
'MBEDTLS_SHA256_C',
|
||||
'MBEDTLS_SHA512_C',
|
||||
])
|
||||
|
||||
def is_classic_dependency(dep):
|
||||
"""Whether dep is a classic dependency that PSA test cases should not use."""
|
||||
if dep.startswith('!'):
|
||||
dep = dep[1:]
|
||||
return dep in CLASSIC_DEPENDENCIES
|
||||
|
||||
def is_systematic_dependency(dep):
|
||||
"""Whether dep is a PSA dependency which is determined systematically."""
|
||||
if dep.startswith('PSA_WANT_ECC_'):
|
||||
return False
|
||||
return dep.startswith('PSA_WANT_')
|
||||
|
||||
WITHOUT_SYSTEMATIC_DEPENDENCIES = frozenset([
|
||||
'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # only a modifier
|
||||
'PSA_ALG_ANY_HASH', # only meaningful in policies
|
||||
'PSA_ALG_KEY_AGREEMENT', # only a way to combine algorithms
|
||||
'PSA_ALG_TRUNCATED_MAC', # only a modifier
|
||||
'PSA_KEY_TYPE_NONE', # not a real key type
|
||||
'PSA_KEY_TYPE_DERIVE', # always supported, don't list it to reduce noise
|
||||
'PSA_KEY_TYPE_RAW_DATA', # always supported, don't list it to reduce noise
|
||||
'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', #only a modifier
|
||||
'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', #only a modifier
|
||||
])
|
||||
|
||||
SPECIAL_SYSTEMATIC_DEPENDENCIES = {
|
||||
'PSA_ALG_ECDSA_ANY': frozenset(['PSA_WANT_ALG_ECDSA']),
|
||||
'PSA_ALG_RSA_PKCS1V15_SIGN_RAW': frozenset(['PSA_WANT_ALG_RSA_PKCS1V15_SIGN']),
|
||||
}
|
||||
|
||||
def dependencies_of_symbol(symbol):
|
||||
"""Return the dependencies for a symbol that designates a cryptographic mechanism."""
|
||||
if symbol in WITHOUT_SYSTEMATIC_DEPENDENCIES:
|
||||
return frozenset()
|
||||
if symbol in SPECIAL_SYSTEMATIC_DEPENDENCIES:
|
||||
return SPECIAL_SYSTEMATIC_DEPENDENCIES[symbol]
|
||||
if symbol.startswith('PSA_ALG_CATEGORY_') or \
|
||||
symbol.startswith('PSA_KEY_TYPE_CATEGORY_'):
|
||||
# Categories are used in test data when an unsupported but plausible
|
||||
# mechanism number needed. They have no associated dependency.
|
||||
return frozenset()
|
||||
return {symbol.replace('_', '_WANT_', 1)}
|
||||
|
||||
def systematic_dependencies(file_name, function_name, arguments):
|
||||
"""List the systematically determined dependency for a test case."""
|
||||
deps = set()
|
||||
|
||||
# Run key policy negative tests even if the algorithm to attempt performing
|
||||
# is not supported but in the case where the test is to check an
|
||||
# incompatibility between a requested algorithm for a cryptographic
|
||||
# operation and a key policy. In the latter, we want to filter out the
|
||||
# cases # where PSA_ERROR_NOT_SUPPORTED is returned instead of
|
||||
# PSA_ERROR_NOT_PERMITTED.
|
||||
if function_name.endswith('_key_policy') and \
|
||||
arguments[-1].startswith('PSA_ERROR_') and \
|
||||
arguments[-1] != ('PSA_ERROR_NOT_PERMITTED'):
|
||||
arguments[-2] = ''
|
||||
if function_name == 'copy_fail' and \
|
||||
arguments[-1].startswith('PSA_ERROR_'):
|
||||
arguments[-2] = ''
|
||||
arguments[-3] = ''
|
||||
|
||||
# Storage format tests that only look at how the file is structured and
|
||||
# don't care about the format of the key material don't depend on any
|
||||
# cryptographic mechanisms.
|
||||
if os.path.basename(file_name) == 'test_suite_psa_crypto_persistent_key.data' and \
|
||||
function_name in {'format_storage_data_check',
|
||||
'parse_storage_data_check'}:
|
||||
return []
|
||||
|
||||
for arg in arguments:
|
||||
for symbol in re.findall(r'PSA_(?:ALG|KEY_TYPE)_\w+', arg):
|
||||
deps.update(dependencies_of_symbol(symbol))
|
||||
return sorted(deps)
|
||||
|
||||
def updated_dependencies(file_name, function_name, arguments, dependencies):
|
||||
"""Rework the list of dependencies into PSA_WANT_xxx.
|
||||
|
||||
Remove classic crypto dependencies such as MBEDTLS_RSA_C,
|
||||
MBEDTLS_PKCS1_V15, etc.
|
||||
|
||||
Add systematic PSA_WANT_xxx dependencies based on the called function and
|
||||
its arguments, replacing existing PSA_WANT_xxx dependencies.
|
||||
"""
|
||||
automatic = systematic_dependencies(file_name, function_name, arguments)
|
||||
manual = [dep for dep in dependencies
|
||||
if not (is_systematic_dependency(dep) or
|
||||
is_classic_dependency(dep))]
|
||||
return automatic + manual
|
||||
|
||||
def keep_manual_dependencies(file_name, function_name, arguments):
|
||||
#pylint: disable=unused-argument
|
||||
"""Declare test functions with unusual dependencies here."""
|
||||
# If there are no arguments, we can't do any useful work. Assume that if
|
||||
# there are dependencies, they are warranted.
|
||||
if not arguments:
|
||||
return True
|
||||
# When PSA_ERROR_NOT_SUPPORTED is expected, usually, at least one of the
|
||||
# constants mentioned in the test should not be supported. It isn't
|
||||
# possible to determine which one in a systematic way. So let the programmer
|
||||
# decide.
|
||||
if arguments[-1] == 'PSA_ERROR_NOT_SUPPORTED':
|
||||
return True
|
||||
return False
|
||||
|
||||
def process_data_stanza(stanza, file_name, test_case_number):
|
||||
"""Update PSA crypto dependencies in one Mbed TLS test case.
|
||||
|
||||
stanza is the test case text (including the description, the dependencies,
|
||||
the line with the function and arguments, and optionally comments). Return
|
||||
a new stanza with an updated dependency line, preserving everything else
|
||||
(description, comments, arguments, etc.).
|
||||
"""
|
||||
if not stanza.lstrip('\n'):
|
||||
# Just blank lines
|
||||
return stanza
|
||||
# Expect 2 or 3 non-comment lines: description, optional dependencies,
|
||||
# function-and-arguments.
|
||||
content_matches = list(re.finditer(r'^[\t ]*([^\t #].*)$', stanza, re.M))
|
||||
if len(content_matches) < 2:
|
||||
raise Exception('Not enough content lines in paragraph {} in {}'
|
||||
.format(test_case_number, file_name))
|
||||
if len(content_matches) > 3:
|
||||
raise Exception('Too many content lines in paragraph {} in {}'
|
||||
.format(test_case_number, file_name))
|
||||
arguments = content_matches[-1].group(0).split(':')
|
||||
function_name = arguments.pop(0)
|
||||
if keep_manual_dependencies(file_name, function_name, arguments):
|
||||
return stanza
|
||||
if len(content_matches) == 2:
|
||||
# Insert a line for the dependencies. If it turns out that there are
|
||||
# no dependencies, we'll remove that empty line below.
|
||||
dependencies_location = content_matches[-1].start()
|
||||
text_before = stanza[:dependencies_location]
|
||||
text_after = '\n' + stanza[dependencies_location:]
|
||||
old_dependencies = []
|
||||
dependencies_leader = 'depends_on:'
|
||||
else:
|
||||
dependencies_match = content_matches[-2]
|
||||
text_before = stanza[:dependencies_match.start()]
|
||||
text_after = stanza[dependencies_match.end():]
|
||||
old_dependencies = dependencies_match.group(0).split(':')
|
||||
dependencies_leader = old_dependencies.pop(0) + ':'
|
||||
if dependencies_leader != 'depends_on:':
|
||||
raise Exception('Next-to-last line does not start with "depends_on:"'
|
||||
' in paragraph {} in {}'
|
||||
.format(test_case_number, file_name))
|
||||
new_dependencies = updated_dependencies(file_name, function_name, arguments,
|
||||
old_dependencies)
|
||||
if new_dependencies:
|
||||
stanza = (text_before +
|
||||
dependencies_leader + ':'.join(new_dependencies) +
|
||||
text_after)
|
||||
else:
|
||||
# The dependencies have become empty. Remove the depends_on: line.
|
||||
assert text_after[0] == '\n'
|
||||
stanza = text_before + text_after[1:]
|
||||
return stanza
|
||||
|
||||
def process_data_file(file_name, old_content):
|
||||
"""Update PSA crypto dependencies in an Mbed TLS test suite data file.
|
||||
|
||||
Process old_content (the old content of the file) and return the new content.
|
||||
"""
|
||||
old_stanzas = old_content.split('\n\n')
|
||||
new_stanzas = [process_data_stanza(stanza, file_name, n)
|
||||
for n, stanza in enumerate(old_stanzas, start=1)]
|
||||
return '\n\n'.join(new_stanzas)
|
||||
|
||||
def update_file(file_name, old_content, new_content):
|
||||
"""Update the given file with the given new content.
|
||||
|
||||
Replace the existing file. The previous version is renamed to *.bak.
|
||||
Don't modify the file if the content was unchanged.
|
||||
"""
|
||||
if new_content == old_content:
|
||||
return
|
||||
backup = file_name + '.bak'
|
||||
tmp = file_name + '.tmp'
|
||||
with open(tmp, 'w', encoding='utf-8') as new_file:
|
||||
new_file.write(new_content)
|
||||
os.replace(file_name, backup)
|
||||
os.replace(tmp, file_name)
|
||||
|
||||
def process_file(file_name):
|
||||
"""Update PSA crypto dependencies in an Mbed TLS test suite data file.
|
||||
|
||||
Replace the existing file. The previous version is renamed to *.bak.
|
||||
Don't modify the file if the content was unchanged.
|
||||
"""
|
||||
old_content = open(file_name, encoding='utf-8').read()
|
||||
if file_name.endswith('.data'):
|
||||
new_content = process_data_file(file_name, old_content)
|
||||
else:
|
||||
raise Exception('File type not recognized: {}'
|
||||
.format(file_name))
|
||||
update_file(file_name, old_content, new_content)
|
||||
|
||||
def main(args):
|
||||
for file_name in args:
|
||||
process_file(file_name)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
89
externals/mbedtls/tests/scripts/tcp_client.pl
vendored
Executable file
89
externals/mbedtls/tests/scripts/tcp_client.pl
vendored
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# A simple TCP client that sends some data and expects a response.
|
||||
# Usage: tcp_client.pl HOSTNAME PORT DATA1 RESPONSE1
|
||||
# DATA: hex-encoded data to send to the server
|
||||
# RESPONSE: regexp that must match the server's response
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
use IO::Socket::INET;
|
||||
|
||||
# Pack hex digits into a binary string, ignoring whitespace.
|
||||
sub parse_hex {
|
||||
my ($hex) = @_;
|
||||
$hex =~ s/\s+//g;
|
||||
return pack('H*', $hex);
|
||||
}
|
||||
|
||||
## Open a TCP connection to the specified host and port.
|
||||
sub open_connection {
|
||||
my ($host, $port) = @_;
|
||||
my $socket = IO::Socket::INET->new(PeerAddr => $host,
|
||||
PeerPort => $port,
|
||||
Proto => 'tcp',
|
||||
Timeout => 1);
|
||||
die "Cannot connect to $host:$port: $!" unless $socket;
|
||||
return $socket;
|
||||
}
|
||||
|
||||
## Close the TCP connection.
|
||||
sub close_connection {
|
||||
my ($connection) = @_;
|
||||
$connection->shutdown(2);
|
||||
# Ignore shutdown failures (at least for now)
|
||||
return 1;
|
||||
}
|
||||
|
||||
## Write the given data, expressed as hexadecimal
|
||||
sub write_data {
|
||||
my ($connection, $hexdata) = @_;
|
||||
my $data = parse_hex($hexdata);
|
||||
my $total_sent = 0;
|
||||
while ($total_sent < length($data)) {
|
||||
my $sent = $connection->send($data, 0);
|
||||
if (!defined $sent) {
|
||||
die "Unable to send data: $!";
|
||||
}
|
||||
$total_sent += $sent;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
## Read a response and check it against an expected prefix
|
||||
sub read_response {
|
||||
my ($connection, $expected_hex) = @_;
|
||||
my $expected_data = parse_hex($expected_hex);
|
||||
my $start_offset = 0;
|
||||
while ($start_offset < length($expected_data)) {
|
||||
my $actual_data;
|
||||
my $ok = $connection->recv($actual_data, length($expected_data));
|
||||
if (!defined $ok) {
|
||||
die "Unable to receive data: $!";
|
||||
}
|
||||
if (($actual_data ^ substr($expected_data, $start_offset)) =~ /[^\000]/) {
|
||||
printf STDERR ("Received \\x%02x instead of \\x%02x at offset %d\n",
|
||||
ord(substr($actual_data, $-[0], 1)),
|
||||
ord(substr($expected_data, $start_offset + $-[0], 1)),
|
||||
$start_offset + $-[0]);
|
||||
return 0;
|
||||
}
|
||||
$start_offset += length($actual_data);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (@ARGV != 4) {
|
||||
print STDERR "Usage: $0 HOSTNAME PORT DATA1 RESPONSE1\n";
|
||||
exit(3);
|
||||
}
|
||||
my ($host, $port, $data1, $response1) = @ARGV;
|
||||
my $connection = open_connection($host, $port);
|
||||
write_data($connection, $data1);
|
||||
if (!read_response($connection, $response1)) {
|
||||
exit(1);
|
||||
}
|
||||
close_connection($connection);
|
||||
161
externals/mbedtls/tests/scripts/test-ref-configs.pl
vendored
Executable file
161
externals/mbedtls/tests/scripts/test-ref-configs.pl
vendored
Executable file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# test-ref-configs.pl
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# For each reference configuration file in the configs directory, build the
|
||||
# configuration, run the test suites and compat.sh
|
||||
#
|
||||
# Usage: tests/scripts/test-ref-configs.pl [config-name [...]]
|
||||
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
my %configs = (
|
||||
'config-ccm-psk-tls1_2.h' => {
|
||||
'compat' => '-m tls12 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
|
||||
'test_again_with_use_psa' => 1
|
||||
},
|
||||
'config-ccm-psk-dtls1_2.h' => {
|
||||
'compat' => '-m dtls12 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
|
||||
'opt' => ' ',
|
||||
'opt_needs_debug' => 1,
|
||||
'test_again_with_use_psa' => 1
|
||||
},
|
||||
'config-no-entropy.h' => {
|
||||
},
|
||||
'config-suite-b.h' => {
|
||||
'compat' => "-m tls12 -f 'ECDHE-ECDSA.*AES.*GCM' -p mbedTLS",
|
||||
'test_again_with_use_psa' => 1,
|
||||
'opt' => ' ',
|
||||
'opt_needs_debug' => 1,
|
||||
},
|
||||
'config-symmetric-only.h' => {
|
||||
'test_again_with_use_psa' => 0, # Uses PSA by default, no need to test it twice
|
||||
},
|
||||
'config-tfm.h' => {
|
||||
'test_again_with_use_psa' => 0, # Uses PSA by default, no need to test it twice
|
||||
},
|
||||
'config-thread.h' => {
|
||||
'opt' => '-f ECJPAKE.*nolog',
|
||||
'test_again_with_use_psa' => 1,
|
||||
},
|
||||
);
|
||||
|
||||
# If no config-name is provided, use all known configs.
|
||||
# Otherwise, use the provided names only.
|
||||
my @configs_to_test = sort keys %configs;
|
||||
if ($#ARGV >= 0) {
|
||||
foreach my $conf_name ( @ARGV ) {
|
||||
if( ! exists $configs{$conf_name} ) {
|
||||
die "Unknown configuration: $conf_name\n";
|
||||
}
|
||||
}
|
||||
@configs_to_test = @ARGV;
|
||||
}
|
||||
|
||||
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
|
||||
|
||||
my $config_h = 'include/mbedtls/mbedtls_config.h';
|
||||
|
||||
system( "cp $config_h $config_h.bak" ) and die;
|
||||
sub abort {
|
||||
system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
|
||||
# use an exit code between 1 and 124 for git bisect (die returns 255)
|
||||
warn $_[0];
|
||||
exit 1;
|
||||
}
|
||||
|
||||
# Create a seedfile for configurations that enable MBEDTLS_ENTROPY_NV_SEED.
|
||||
# For test purposes, this doesn't have to be cryptographically random.
|
||||
if (!-e "tests/seedfile" || -s "tests/seedfile" < 64) {
|
||||
local *SEEDFILE;
|
||||
open SEEDFILE, ">tests/seedfile" or die;
|
||||
print SEEDFILE "*" x 64 or die;
|
||||
close SEEDFILE or die;
|
||||
}
|
||||
|
||||
sub perform_test {
|
||||
my $conf_file = $_[0];
|
||||
my $data = $_[1];
|
||||
my $test_with_psa = $_[2];
|
||||
|
||||
my $conf_name = $conf_file;
|
||||
if ( $test_with_psa )
|
||||
{
|
||||
$conf_name .= "+PSA";
|
||||
}
|
||||
|
||||
system( "cp $config_h.bak $config_h" ) and die;
|
||||
system( "make clean" ) and die;
|
||||
|
||||
print "\n******************************************\n";
|
||||
print "* Testing configuration: $conf_name\n";
|
||||
print "******************************************\n";
|
||||
|
||||
$ENV{MBEDTLS_TEST_CONFIGURATION} = $conf_name;
|
||||
|
||||
system( "cp configs/$conf_file $config_h" )
|
||||
and abort "Failed to activate $conf_file\n";
|
||||
|
||||
if ( $test_with_psa )
|
||||
{
|
||||
system( "scripts/config.py set MBEDTLS_PSA_CRYPTO_C" );
|
||||
system( "scripts/config.py set MBEDTLS_USE_PSA_CRYPTO" );
|
||||
}
|
||||
|
||||
system( "CFLAGS='-Os -Werror -Wall -Wextra' make" ) and abort "Failed to build: $conf_name\n";
|
||||
system( "make test" ) and abort "Failed test suite: $conf_name\n";
|
||||
|
||||
my $compat = $data->{'compat'};
|
||||
if( $compat )
|
||||
{
|
||||
print "\nrunning compat.sh $compat ($conf_name)\n";
|
||||
system( "tests/compat.sh $compat" )
|
||||
and abort "Failed compat.sh: $conf_name\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
print "\nskipping compat.sh ($conf_name)\n";
|
||||
}
|
||||
|
||||
my $opt = $data->{'opt'};
|
||||
if( $opt )
|
||||
{
|
||||
if( $data->{'opt_needs_debug'} )
|
||||
{
|
||||
print "\nrebuilding with debug traces for ssl-opt ($conf_name)\n";
|
||||
$conf_name .= '+DEBUG';
|
||||
$ENV{MBEDTLS_TEST_CONFIGURATION} = $conf_name;
|
||||
system( "make clean" );
|
||||
system( "scripts/config.py set MBEDTLS_DEBUG_C" );
|
||||
system( "scripts/config.py set MBEDTLS_ERROR_C" );
|
||||
system( "CFLAGS='-Os -Werror -Wall -Wextra' make" ) and abort "Failed to build: $conf_name\n";
|
||||
}
|
||||
|
||||
print "\nrunning ssl-opt.sh $opt ($conf_name)\n";
|
||||
system( "tests/ssl-opt.sh $opt" )
|
||||
and abort "Failed ssl-opt.sh: $conf_name\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
print "\nskipping ssl-opt.sh ($conf_name)\n";
|
||||
}
|
||||
}
|
||||
|
||||
foreach my $conf ( @configs_to_test ) {
|
||||
my $test_with_psa = $configs{$conf}{'test_again_with_use_psa'};
|
||||
if ( $test_with_psa )
|
||||
{
|
||||
perform_test( $conf, $configs{$conf}, $test_with_psa );
|
||||
}
|
||||
perform_test( $conf, $configs{$conf}, 0 );
|
||||
}
|
||||
|
||||
system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
|
||||
system( "make clean" );
|
||||
exit 0;
|
||||
175
externals/mbedtls/tests/scripts/test_config_script.py
vendored
Executable file
175
externals/mbedtls/tests/scripts/test_config_script.py
vendored
Executable file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""Test helper for the Mbed TLS configuration file tool
|
||||
|
||||
Run config.py with various parameters and write the results to files.
|
||||
|
||||
This is a harness to help regression testing, not a functional tester.
|
||||
Sample usage:
|
||||
|
||||
test_config_script.py -d old
|
||||
## Modify config.py and/or mbedtls_config.h ##
|
||||
test_config_script.py -d new
|
||||
diff -ru old new
|
||||
"""
|
||||
|
||||
## Copyright The Mbed TLS Contributors
|
||||
## SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
##
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
OUTPUT_FILE_PREFIX = 'config-'
|
||||
|
||||
def output_file_name(directory, stem, extension):
|
||||
return os.path.join(directory,
|
||||
'{}{}.{}'.format(OUTPUT_FILE_PREFIX,
|
||||
stem, extension))
|
||||
|
||||
def cleanup_directory(directory):
|
||||
"""Remove old output files."""
|
||||
for extension in []:
|
||||
pattern = output_file_name(directory, '*', extension)
|
||||
filenames = glob.glob(pattern)
|
||||
for filename in filenames:
|
||||
os.remove(filename)
|
||||
|
||||
def prepare_directory(directory):
|
||||
"""Create the output directory if it doesn't exist yet.
|
||||
|
||||
If there are old output files, remove them.
|
||||
"""
|
||||
if os.path.exists(directory):
|
||||
cleanup_directory(directory)
|
||||
else:
|
||||
os.makedirs(directory)
|
||||
|
||||
def guess_presets_from_help(help_text):
|
||||
"""Figure out what presets the script supports.
|
||||
|
||||
help_text should be the output from running the script with --help.
|
||||
"""
|
||||
# Try the output format from config.py
|
||||
hits = re.findall(r'\{([-\w,]+)\}', help_text)
|
||||
for hit in hits:
|
||||
words = set(hit.split(','))
|
||||
if 'get' in words and 'set' in words and 'unset' in words:
|
||||
words.remove('get')
|
||||
words.remove('set')
|
||||
words.remove('unset')
|
||||
return words
|
||||
# Try the output format from config.pl
|
||||
hits = re.findall(r'\n +([-\w]+) +- ', help_text)
|
||||
if hits:
|
||||
return hits
|
||||
raise Exception("Unable to figure out supported presets. Pass the '-p' option.")
|
||||
|
||||
def list_presets(options):
|
||||
"""Return the list of presets to test.
|
||||
|
||||
The list is taken from the command line if present, otherwise it is
|
||||
extracted from running the config script with --help.
|
||||
"""
|
||||
if options.presets:
|
||||
return re.split(r'[ ,]+', options.presets)
|
||||
else:
|
||||
help_text = subprocess.run([options.script, '--help'],
|
||||
check=False, # config.pl --help returns 255
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT).stdout
|
||||
return guess_presets_from_help(help_text.decode('ascii'))
|
||||
|
||||
def run_one(options, args, stem_prefix='', input_file=None):
|
||||
"""Run the config script with the given arguments.
|
||||
|
||||
Take the original content from input_file if specified, defaulting
|
||||
to options.input_file if input_file is None.
|
||||
|
||||
Write the following files, where xxx contains stem_prefix followed by
|
||||
a filename-friendly encoding of args:
|
||||
* config-xxx.h: modified file.
|
||||
* config-xxx.out: standard output.
|
||||
* config-xxx.err: standard output.
|
||||
* config-xxx.status: exit code.
|
||||
|
||||
Return ("xxx+", "path/to/config-xxx.h") which can be used as
|
||||
stem_prefix and input_file to call this function again with new args.
|
||||
"""
|
||||
if input_file is None:
|
||||
input_file = options.input_file
|
||||
stem = stem_prefix + '-'.join(args)
|
||||
data_filename = output_file_name(options.output_directory, stem, 'h')
|
||||
stdout_filename = output_file_name(options.output_directory, stem, 'out')
|
||||
stderr_filename = output_file_name(options.output_directory, stem, 'err')
|
||||
status_filename = output_file_name(options.output_directory, stem, 'status')
|
||||
shutil.copy(input_file, data_filename)
|
||||
# Pass only the file basename, not the full path, to avoid getting the
|
||||
# directory name in error messages, which would make comparisons
|
||||
# between output directories more difficult.
|
||||
cmd = [os.path.abspath(options.script),
|
||||
'-f', os.path.basename(data_filename)]
|
||||
with open(stdout_filename, 'wb') as out:
|
||||
with open(stderr_filename, 'wb') as err:
|
||||
status = subprocess.call(cmd + args,
|
||||
cwd=options.output_directory,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=out, stderr=err)
|
||||
with open(status_filename, 'w') as status_file:
|
||||
status_file.write('{}\n'.format(status))
|
||||
return stem + "+", data_filename
|
||||
|
||||
### A list of symbols to test with.
|
||||
### This script currently tests what happens when you change a symbol from
|
||||
### having a value to not having a value or vice versa. This is not
|
||||
### necessarily useful behavior, and we may not consider it a bug if
|
||||
### config.py stops handling that case correctly.
|
||||
TEST_SYMBOLS = [
|
||||
'CUSTOM_SYMBOL', # does not exist
|
||||
'MBEDTLS_AES_C', # set, no value
|
||||
'MBEDTLS_MPI_MAX_SIZE', # unset, has a value
|
||||
'MBEDTLS_NO_UDBL_DIVISION', # unset, in "System support"
|
||||
'MBEDTLS_PLATFORM_ZEROIZE_ALT', # unset, in "Customisation configuration options"
|
||||
]
|
||||
|
||||
def run_all(options):
|
||||
"""Run all the command lines to test."""
|
||||
presets = list_presets(options)
|
||||
for preset in presets:
|
||||
run_one(options, [preset])
|
||||
for symbol in TEST_SYMBOLS:
|
||||
run_one(options, ['get', symbol])
|
||||
(stem, filename) = run_one(options, ['set', symbol])
|
||||
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
|
||||
run_one(options, ['--force', 'set', symbol])
|
||||
(stem, filename) = run_one(options, ['set', symbol, 'value'])
|
||||
run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
|
||||
run_one(options, ['--force', 'set', symbol, 'value'])
|
||||
run_one(options, ['unset', symbol])
|
||||
|
||||
def main():
|
||||
"""Command line entry point."""
|
||||
parser = argparse.ArgumentParser(description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.add_argument('-d', metavar='DIR',
|
||||
dest='output_directory', required=True,
|
||||
help="""Output directory.""")
|
||||
parser.add_argument('-f', metavar='FILE',
|
||||
dest='input_file', default='include/mbedtls/mbedtls_config.h',
|
||||
help="""Config file (default: %(default)s).""")
|
||||
parser.add_argument('-p', metavar='PRESET,...',
|
||||
dest='presets',
|
||||
help="""Presets to test (default: guessed from --help).""")
|
||||
parser.add_argument('-s', metavar='FILE',
|
||||
dest='script', default='scripts/config.py',
|
||||
help="""Configuration script (default: %(default)s).""")
|
||||
options = parser.parse_args()
|
||||
prepare_directory(options.output_directory)
|
||||
run_all(options)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1915
externals/mbedtls/tests/scripts/test_generate_test_code.py
vendored
Executable file
1915
externals/mbedtls/tests/scripts/test_generate_test_code.py
vendored
Executable file
File diff suppressed because it is too large
Load Diff
159
externals/mbedtls/tests/scripts/test_psa_compliance.py
vendored
Executable file
159
externals/mbedtls/tests/scripts/test_psa_compliance.py
vendored
Executable file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Run the PSA Crypto API compliance test suite.
|
||||
Clone the repo and check out the commit specified by PSA_ARCH_TEST_REPO and PSA_ARCH_TEST_REF,
|
||||
then compile and run the test suite. The clone is stored at <repository root>/psa-arch-tests.
|
||||
Known defects in either the test suite or mbedtls / TF-PSA-Crypto - identified by their test
|
||||
number - are ignored, while unexpected failures AND successes are reported as errors, to help
|
||||
keep the list of known defects as up to date as possible.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
#pylint: disable=unused-import
|
||||
import scripts_path
|
||||
from mbedtls_dev import build_tree
|
||||
|
||||
# PSA Compliance tests we expect to fail due to known defects in Mbed TLS /
|
||||
# TF-PSA-Crypto (or the test suite).
|
||||
# The test numbers correspond to the numbers used by the console output of the test suite.
|
||||
# Test number 2xx corresponds to the files in the folder
|
||||
# psa-arch-tests/api-tests/dev_apis/crypto/test_c0xx
|
||||
EXPECTED_FAILURES = {} # type: dict
|
||||
|
||||
PSA_ARCH_TESTS_REPO = 'https://github.com/ARM-software/psa-arch-tests.git'
|
||||
PSA_ARCH_TESTS_REF = 'v23.06_API1.5_ADAC_EAC'
|
||||
|
||||
#pylint: disable=too-many-branches,too-many-statements,too-many-locals
|
||||
def main(library_build_dir: str):
|
||||
root_dir = os.getcwd()
|
||||
|
||||
in_tf_psa_crypto_repo = build_tree.looks_like_tf_psa_crypto_root(root_dir)
|
||||
|
||||
crypto_name = build_tree.crypto_library_filename(root_dir)
|
||||
library_subdir = build_tree.crypto_core_directory(root_dir, relative=True)
|
||||
|
||||
crypto_lib_filename = (library_build_dir + '/' +
|
||||
library_subdir + '/' +
|
||||
'lib' + crypto_name + '.a')
|
||||
|
||||
if not os.path.exists(crypto_lib_filename):
|
||||
#pylint: disable=bad-continuation
|
||||
subprocess.check_call([
|
||||
'cmake', '.',
|
||||
'-GUnix Makefiles',
|
||||
'-B' + library_build_dir
|
||||
])
|
||||
subprocess.check_call(['cmake', '--build', library_build_dir,
|
||||
'--target', crypto_name])
|
||||
|
||||
psa_arch_tests_dir = 'psa-arch-tests'
|
||||
os.makedirs(psa_arch_tests_dir, exist_ok=True)
|
||||
try:
|
||||
os.chdir(psa_arch_tests_dir)
|
||||
|
||||
# Reuse existing local clone
|
||||
subprocess.check_call(['git', 'init'])
|
||||
subprocess.check_call(['git', 'fetch', PSA_ARCH_TESTS_REPO, PSA_ARCH_TESTS_REF])
|
||||
subprocess.check_call(['git', 'checkout', 'FETCH_HEAD'])
|
||||
|
||||
build_dir = 'api-tests/build'
|
||||
try:
|
||||
shutil.rmtree(build_dir)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
os.mkdir(build_dir)
|
||||
os.chdir(build_dir)
|
||||
|
||||
extra_includes = (';{}/drivers/builtin/include'.format(root_dir)
|
||||
if in_tf_psa_crypto_repo else '')
|
||||
|
||||
#pylint: disable=bad-continuation
|
||||
subprocess.check_call([
|
||||
'cmake', '..',
|
||||
'-GUnix Makefiles',
|
||||
'-DTARGET=tgt_dev_apis_stdc',
|
||||
'-DTOOLCHAIN=HOST_GCC',
|
||||
'-DSUITE=CRYPTO',
|
||||
'-DPSA_CRYPTO_LIB_FILENAME={}/{}'.format(root_dir,
|
||||
crypto_lib_filename),
|
||||
('-DPSA_INCLUDE_PATHS={}/include' + extra_includes).format(root_dir)
|
||||
])
|
||||
subprocess.check_call(['cmake', '--build', '.'])
|
||||
|
||||
proc = subprocess.Popen(['./psa-arch-tests-crypto'],
|
||||
bufsize=1, stdout=subprocess.PIPE, universal_newlines=True)
|
||||
|
||||
test_re = re.compile(
|
||||
'^TEST: (?P<test_num>[0-9]*)|'
|
||||
'^TEST RESULT: (?P<test_result>FAILED|PASSED)'
|
||||
)
|
||||
test = -1
|
||||
unexpected_successes = set(EXPECTED_FAILURES)
|
||||
expected_failures = [] # type: List[int]
|
||||
unexpected_failures = [] # type: List[int]
|
||||
if proc.stdout is None:
|
||||
return 1
|
||||
|
||||
for line in proc.stdout:
|
||||
print(line, end='')
|
||||
match = test_re.match(line)
|
||||
if match is not None:
|
||||
groupdict = match.groupdict()
|
||||
test_num = groupdict['test_num']
|
||||
if test_num is not None:
|
||||
test = int(test_num)
|
||||
elif groupdict['test_result'] == 'FAILED':
|
||||
try:
|
||||
unexpected_successes.remove(test)
|
||||
expected_failures.append(test)
|
||||
print('Expected failure, ignoring')
|
||||
except KeyError:
|
||||
unexpected_failures.append(test)
|
||||
print('ERROR: Unexpected failure')
|
||||
elif test in unexpected_successes:
|
||||
print('ERROR: Unexpected success')
|
||||
proc.wait()
|
||||
|
||||
print()
|
||||
print('***** test_psa_compliance.py report ******')
|
||||
print()
|
||||
print('Expected failures:', ', '.join(str(i) for i in expected_failures))
|
||||
print('Unexpected failures:', ', '.join(str(i) for i in unexpected_failures))
|
||||
print('Unexpected successes:', ', '.join(str(i) for i in sorted(unexpected_successes)))
|
||||
print()
|
||||
if unexpected_successes or unexpected_failures:
|
||||
if unexpected_successes:
|
||||
print('Unexpected successes encountered.')
|
||||
print('Please remove the corresponding tests from '
|
||||
'EXPECTED_FAILURES in tests/scripts/compliance_test.py')
|
||||
print()
|
||||
print('FAILED')
|
||||
return 1
|
||||
else:
|
||||
print('SUCCESS')
|
||||
return 0
|
||||
finally:
|
||||
os.chdir(root_dir)
|
||||
|
||||
if __name__ == '__main__':
|
||||
BUILD_DIR = 'out_of_source_build'
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--build-dir', nargs=1,
|
||||
help='path to Mbed TLS / TF-PSA-Crypto build directory')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.build_dir is not None:
|
||||
BUILD_DIR = args.build_dir[0]
|
||||
|
||||
sys.exit(main(BUILD_DIR))
|
||||
191
externals/mbedtls/tests/scripts/test_psa_constant_names.py
vendored
Executable file
191
externals/mbedtls/tests/scripts/test_psa_constant_names.py
vendored
Executable file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test the program psa_constant_names.
|
||||
Gather constant names from header files and test cases. Compile a C program
|
||||
to print out their numerical values, feed these numerical values to
|
||||
psa_constant_names, and check that the output is the original name.
|
||||
Return 0 if all test cases pass, 1 if the output was not always as expected,
|
||||
or 1 (with a Python backtrace) if there was an operational error.
|
||||
"""
|
||||
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
import argparse
|
||||
from collections import namedtuple
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Iterable, List, Optional, Tuple
|
||||
|
||||
import scripts_path # pylint: disable=unused-import
|
||||
from mbedtls_dev import c_build_helper
|
||||
from mbedtls_dev.macro_collector import InputsForTest, PSAMacroEnumerator
|
||||
from mbedtls_dev import typing_util
|
||||
|
||||
def gather_inputs(headers: Iterable[str],
|
||||
test_suites: Iterable[str],
|
||||
inputs_class=InputsForTest) -> PSAMacroEnumerator:
|
||||
"""Read the list of inputs to test psa_constant_names with."""
|
||||
inputs = inputs_class()
|
||||
for header in headers:
|
||||
inputs.parse_header(header)
|
||||
for test_cases in test_suites:
|
||||
inputs.parse_test_cases(test_cases)
|
||||
inputs.add_numerical_values()
|
||||
inputs.gather_arguments()
|
||||
return inputs
|
||||
|
||||
def run_c(type_word: str,
|
||||
expressions: Iterable[str],
|
||||
include_path: Optional[str] = None,
|
||||
keep_c: bool = False) -> List[str]:
|
||||
"""Generate and run a program to print out numerical values of C expressions."""
|
||||
if type_word == 'status':
|
||||
cast_to = 'long'
|
||||
printf_format = '%ld'
|
||||
else:
|
||||
cast_to = 'unsigned long'
|
||||
printf_format = '0x%08lx'
|
||||
return c_build_helper.get_c_expression_values(
|
||||
cast_to, printf_format,
|
||||
expressions,
|
||||
caller='test_psa_constant_names.py for {} values'.format(type_word),
|
||||
file_label=type_word,
|
||||
header='#include <psa/crypto.h>',
|
||||
include_path=include_path,
|
||||
keep_c=keep_c
|
||||
)
|
||||
|
||||
NORMALIZE_STRIP_RE = re.compile(r'\s+')
|
||||
def normalize(expr: str) -> str:
|
||||
"""Normalize the C expression so as not to care about trivial differences.
|
||||
|
||||
Currently "trivial differences" means whitespace.
|
||||
"""
|
||||
return re.sub(NORMALIZE_STRIP_RE, '', expr)
|
||||
|
||||
ALG_TRUNCATED_TO_SELF_RE = \
|
||||
re.compile(r'PSA_ALG_AEAD_WITH_SHORTENED_TAG\('
|
||||
r'PSA_ALG_(?:CCM|CHACHA20_POLY1305|GCM)'
|
||||
r', *16\)\Z')
|
||||
|
||||
def is_simplifiable(expr: str) -> bool:
|
||||
"""Determine whether an expression is simplifiable.
|
||||
|
||||
Simplifiable expressions can't be output in their input form, since
|
||||
the output will be the simple form. Therefore they must be excluded
|
||||
from testing.
|
||||
"""
|
||||
if ALG_TRUNCATED_TO_SELF_RE.match(expr):
|
||||
return True
|
||||
return False
|
||||
|
||||
def collect_values(inputs: InputsForTest,
|
||||
type_word: str,
|
||||
include_path: Optional[str] = None,
|
||||
keep_c: bool = False) -> Tuple[List[str], List[str]]:
|
||||
"""Generate expressions using known macro names and calculate their values.
|
||||
|
||||
Return a list of pairs of (expr, value) where expr is an expression and
|
||||
value is a string representation of its integer value.
|
||||
"""
|
||||
names = inputs.get_names(type_word)
|
||||
expressions = sorted(expr
|
||||
for expr in inputs.generate_expressions(names)
|
||||
if not is_simplifiable(expr))
|
||||
values = run_c(type_word, expressions,
|
||||
include_path=include_path, keep_c=keep_c)
|
||||
return expressions, values
|
||||
|
||||
class Tests:
|
||||
"""An object representing tests and their results."""
|
||||
|
||||
Error = namedtuple('Error',
|
||||
['type', 'expression', 'value', 'output'])
|
||||
|
||||
def __init__(self, options) -> None:
|
||||
self.options = options
|
||||
self.count = 0
|
||||
self.errors = [] #type: List[Tests.Error]
|
||||
|
||||
def run_one(self, inputs: InputsForTest, type_word: str) -> None:
|
||||
"""Test psa_constant_names for the specified type.
|
||||
|
||||
Run the program on the names for this type.
|
||||
Use the inputs to figure out what arguments to pass to macros that
|
||||
take arguments.
|
||||
"""
|
||||
expressions, values = collect_values(inputs, type_word,
|
||||
include_path=self.options.include,
|
||||
keep_c=self.options.keep_c)
|
||||
output_bytes = subprocess.check_output([self.options.program,
|
||||
type_word] + values)
|
||||
output = output_bytes.decode('ascii')
|
||||
outputs = output.strip().split('\n')
|
||||
self.count += len(expressions)
|
||||
for expr, value, output in zip(expressions, values, outputs):
|
||||
if self.options.show:
|
||||
sys.stdout.write('{} {}\t{}\n'.format(type_word, value, output))
|
||||
if normalize(expr) != normalize(output):
|
||||
self.errors.append(self.Error(type=type_word,
|
||||
expression=expr,
|
||||
value=value,
|
||||
output=output))
|
||||
|
||||
def run_all(self, inputs: InputsForTest) -> None:
|
||||
"""Run psa_constant_names on all the gathered inputs."""
|
||||
for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
|
||||
'key_type', 'key_usage']:
|
||||
self.run_one(inputs, type_word)
|
||||
|
||||
def report(self, out: typing_util.Writable) -> None:
|
||||
"""Describe each case where the output is not as expected.
|
||||
|
||||
Write the errors to ``out``.
|
||||
Also write a total.
|
||||
"""
|
||||
for error in self.errors:
|
||||
out.write('For {} "{}", got "{}" (value: {})\n'
|
||||
.format(error.type, error.expression,
|
||||
error.output, error.value))
|
||||
out.write('{} test cases'.format(self.count))
|
||||
if self.errors:
|
||||
out.write(', {} FAIL\n'.format(len(self.errors)))
|
||||
else:
|
||||
out.write(' PASS\n')
|
||||
|
||||
HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
|
||||
TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description=globals()['__doc__'])
|
||||
parser.add_argument('--include', '-I',
|
||||
action='append', default=['include'],
|
||||
help='Directory for header files')
|
||||
parser.add_argument('--keep-c',
|
||||
action='store_true', dest='keep_c', default=False,
|
||||
help='Keep the intermediate C file')
|
||||
parser.add_argument('--no-keep-c',
|
||||
action='store_false', dest='keep_c',
|
||||
help='Don\'t keep the intermediate C file (default)')
|
||||
parser.add_argument('--program',
|
||||
default='programs/psa/psa_constant_names',
|
||||
help='Program to test')
|
||||
parser.add_argument('--show',
|
||||
action='store_true',
|
||||
help='Show tested values on stdout')
|
||||
parser.add_argument('--no-show',
|
||||
action='store_false', dest='show',
|
||||
help='Don\'t show tested values (default)')
|
||||
options = parser.parse_args()
|
||||
headers = [os.path.join(options.include[0], h) for h in HEADERS]
|
||||
inputs = gather_inputs(headers, TEST_SUITES)
|
||||
tests = Tests(options)
|
||||
tests.run_all(inputs)
|
||||
tests.report(sys.stdout)
|
||||
if tests.errors:
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
64
externals/mbedtls/tests/scripts/test_zeroize.gdb
vendored
Normal file
64
externals/mbedtls/tests/scripts/test_zeroize.gdb
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# test_zeroize.gdb
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# Run a test using the debugger to check that the mbedtls_platform_zeroize()
|
||||
# function in platform_util.h is not being optimized out by the compiler. To do
|
||||
# so, the script loads the test program at programs/test/zeroize.c and sets a
|
||||
# breakpoint at the last return statement in main(). When the breakpoint is
|
||||
# hit, the debugger manually checks the contents to be zeroized and checks that
|
||||
# it is actually cleared.
|
||||
#
|
||||
# The mbedtls_platform_zeroize() test is debugger driven because there does not
|
||||
# seem to be a mechanism to reliably check whether the zeroize calls are being
|
||||
# eliminated by compiler optimizations from within the compiled program. The
|
||||
# problem is that a compiler would typically remove what it considers to be
|
||||
# "unnecessary" assignments as part of redundant code elimination. To identify
|
||||
# such code, the compilar will create some form dependency graph between
|
||||
# reads and writes to variables (among other situations). It will then use this
|
||||
# data structure to remove redundant code that does not have an impact on the
|
||||
# program's observable behavior. In the case of mbedtls_platform_zeroize(), an
|
||||
# intelligent compiler could determine that this function clears a block of
|
||||
# memory that is not accessed later in the program, so removing the call to
|
||||
# mbedtls_platform_zeroize() does not have an observable behavior. However,
|
||||
# inserting a test after a call to mbedtls_platform_zeroize() to check whether
|
||||
# the block of memory was correctly zeroed would force the compiler to not
|
||||
# eliminate the mbedtls_platform_zeroize() call. If this does not occur, then
|
||||
# the compiler potentially has a bug.
|
||||
#
|
||||
# Note: This test requires that the test program is compiled with -g3.
|
||||
|
||||
set confirm off
|
||||
|
||||
file ./programs/test/zeroize
|
||||
|
||||
search GDB_BREAK_HERE
|
||||
break $_
|
||||
|
||||
set args ./programs/test/zeroize.c
|
||||
run
|
||||
|
||||
set $i = 0
|
||||
set $len = sizeof(buf)
|
||||
set $buf = buf
|
||||
|
||||
while $i < $len
|
||||
if $buf[$i++] != 0
|
||||
echo The buffer at was not zeroized\n
|
||||
quit 1
|
||||
end
|
||||
end
|
||||
|
||||
echo The buffer was correctly zeroized\n
|
||||
|
||||
continue
|
||||
|
||||
if $_exitcode != 0
|
||||
echo The program did not terminate correctly\n
|
||||
quit 1
|
||||
end
|
||||
|
||||
quit 0
|
||||
180
externals/mbedtls/tests/scripts/translate_ciphers.py
vendored
Executable file
180
externals/mbedtls/tests/scripts/translate_ciphers.py
vendored
Executable file
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# translate_ciphers.py
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Translate standard ciphersuite names to GnuTLS, OpenSSL and Mbed TLS standards.
|
||||
|
||||
To test the translation functions run:
|
||||
python3 -m unittest translate_cipher.py
|
||||
"""
|
||||
|
||||
import re
|
||||
import argparse
|
||||
import unittest
|
||||
|
||||
class TestTranslateCiphers(unittest.TestCase):
|
||||
"""
|
||||
Ensure translate_ciphers.py translates and formats ciphersuite names
|
||||
correctly
|
||||
"""
|
||||
def test_translate_all_cipher_names(self):
|
||||
"""
|
||||
Translate standard ciphersuite names to GnuTLS, OpenSSL and
|
||||
Mbed TLS counterpart. Use only a small subset of ciphers
|
||||
that exercise each step of the translation functions
|
||||
"""
|
||||
ciphers = [
|
||||
("TLS_ECDHE_ECDSA_WITH_NULL_SHA",
|
||||
"+ECDHE-ECDSA:+NULL:+SHA1",
|
||||
"ECDHE-ECDSA-NULL-SHA",
|
||||
"TLS-ECDHE-ECDSA-WITH-NULL-SHA"),
|
||||
("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"+ECDHE-ECDSA:+AES-128-GCM:+AEAD",
|
||||
"ECDHE-ECDSA-AES128-GCM-SHA256",
|
||||
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256"),
|
||||
("TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
"+DHE-RSA:+3DES-CBC:+SHA1",
|
||||
"EDH-RSA-DES-CBC3-SHA",
|
||||
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA"),
|
||||
("TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
"+RSA:+AES-256-CBC:+SHA1",
|
||||
"AES256-SHA",
|
||||
"TLS-RSA-WITH-AES-256-CBC-SHA"),
|
||||
("TLS_PSK_WITH_3DES_EDE_CBC_SHA",
|
||||
"+PSK:+3DES-CBC:+SHA1",
|
||||
"PSK-3DES-EDE-CBC-SHA",
|
||||
"TLS-PSK-WITH-3DES-EDE-CBC-SHA"),
|
||||
("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||
None,
|
||||
"ECDHE-ECDSA-CHACHA20-POLY1305",
|
||||
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256"),
|
||||
("TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
|
||||
"+ECDHE-ECDSA:+AES-128-CCM:+AEAD",
|
||||
None,
|
||||
"TLS-ECDHE-ECDSA-WITH-AES-128-CCM"),
|
||||
("TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384",
|
||||
None,
|
||||
"ECDHE-ARIA256-GCM-SHA384",
|
||||
"TLS-ECDHE-RSA-WITH-ARIA-256-GCM-SHA384"),
|
||||
]
|
||||
|
||||
for s, g_exp, o_exp, m_exp in ciphers:
|
||||
|
||||
if g_exp is not None:
|
||||
g = translate_gnutls(s)
|
||||
self.assertEqual(g, g_exp)
|
||||
|
||||
if o_exp is not None:
|
||||
o = translate_ossl(s)
|
||||
self.assertEqual(o, o_exp)
|
||||
|
||||
if m_exp is not None:
|
||||
m = translate_mbedtls(s)
|
||||
self.assertEqual(m, m_exp)
|
||||
|
||||
def translate_gnutls(s_cipher):
|
||||
"""
|
||||
Translate s_cipher from standard ciphersuite naming convention
|
||||
and return the GnuTLS naming convention
|
||||
"""
|
||||
|
||||
# Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
|
||||
# naming convention
|
||||
s_cipher = s_cipher.replace("_", "-")
|
||||
|
||||
s_cipher = re.sub(r'\ATLS-', '+', s_cipher)
|
||||
s_cipher = s_cipher.replace("-WITH-", ":+")
|
||||
s_cipher = s_cipher.replace("-EDE", "")
|
||||
|
||||
# SHA in Mbed TLS == SHA1 GnuTLS,
|
||||
# if the last 3 chars are SHA append 1
|
||||
if s_cipher[-3:] == "SHA":
|
||||
s_cipher = s_cipher+"1"
|
||||
|
||||
# CCM or CCM-8 should be followed by ":+AEAD"
|
||||
# Replace "GCM:+SHAxyz" with "GCM:+AEAD"
|
||||
if "CCM" in s_cipher or "GCM" in s_cipher:
|
||||
s_cipher = re.sub(r"GCM-SHA\d\d\d", "GCM", s_cipher)
|
||||
s_cipher = s_cipher+":+AEAD"
|
||||
|
||||
# Replace the last "-" with ":+"
|
||||
else:
|
||||
index = s_cipher.rindex("-")
|
||||
s_cipher = s_cipher[:index] + ":+" + s_cipher[index+1:]
|
||||
|
||||
return s_cipher
|
||||
|
||||
def translate_ossl(s_cipher):
|
||||
"""
|
||||
Translate s_cipher from standard ciphersuite naming convention
|
||||
and return the OpenSSL naming convention
|
||||
"""
|
||||
|
||||
# Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
|
||||
# naming convention
|
||||
s_cipher = s_cipher.replace("_", "-")
|
||||
|
||||
s_cipher = re.sub(r'^TLS-', '', s_cipher)
|
||||
s_cipher = s_cipher.replace("-WITH", "")
|
||||
|
||||
# Remove the "-" from "ABC-xyz"
|
||||
s_cipher = s_cipher.replace("AES-", "AES")
|
||||
s_cipher = s_cipher.replace("CAMELLIA-", "CAMELLIA")
|
||||
s_cipher = s_cipher.replace("ARIA-", "ARIA")
|
||||
|
||||
# Remove "RSA" if it is at the beginning
|
||||
s_cipher = re.sub(r'^RSA-', r'', s_cipher)
|
||||
|
||||
# For all circumstances outside of PSK
|
||||
if "PSK" not in s_cipher:
|
||||
s_cipher = s_cipher.replace("-EDE", "")
|
||||
s_cipher = s_cipher.replace("3DES-CBC", "DES-CBC3")
|
||||
|
||||
# Remove "CBC" if it is not prefixed by DES
|
||||
s_cipher = re.sub(r'(?<!DES-)CBC-', r'', s_cipher)
|
||||
|
||||
# ECDHE-RSA-ARIA does not exist in OpenSSL
|
||||
s_cipher = s_cipher.replace("ECDHE-RSA-ARIA", "ECDHE-ARIA")
|
||||
|
||||
# POLY1305 should not be followed by anything
|
||||
if "POLY1305" in s_cipher:
|
||||
index = s_cipher.rindex("POLY1305")
|
||||
s_cipher = s_cipher[:index+8]
|
||||
|
||||
# If DES is being used, Replace DHE with EDH
|
||||
if "DES" in s_cipher and "DHE" in s_cipher and "ECDHE" not in s_cipher:
|
||||
s_cipher = s_cipher.replace("DHE", "EDH")
|
||||
|
||||
return s_cipher
|
||||
|
||||
def translate_mbedtls(s_cipher):
|
||||
"""
|
||||
Translate s_cipher from standard ciphersuite naming convention
|
||||
and return Mbed TLS ciphersuite naming convention
|
||||
"""
|
||||
|
||||
# Replace "_" with "-"
|
||||
s_cipher = s_cipher.replace("_", "-")
|
||||
|
||||
return s_cipher
|
||||
|
||||
def format_ciphersuite_names(mode, names):
|
||||
t = {"g": translate_gnutls,
|
||||
"o": translate_ossl,
|
||||
"m": translate_mbedtls
|
||||
}[mode]
|
||||
return " ".join(c + '=' + t(c) for c in names)
|
||||
|
||||
def main(target, names):
|
||||
print(format_ciphersuite_names(target, names))
|
||||
|
||||
if __name__ == "__main__":
|
||||
PARSER = argparse.ArgumentParser()
|
||||
PARSER.add_argument('target', metavar='TARGET', choices=['o', 'g', 'm'])
|
||||
PARSER.add_argument('names', metavar='NAMES', nargs='+')
|
||||
ARGS = PARSER.parse_args()
|
||||
main(ARGS.target, ARGS.names)
|
||||
35
externals/mbedtls/tests/scripts/travis-log-failure.sh
vendored
Executable file
35
externals/mbedtls/tests/scripts/travis-log-failure.sh
vendored
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/sh
|
||||
|
||||
# travis-log-failure.sh
|
||||
#
|
||||
# Copyright The Mbed TLS Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# List the server and client logs on failed ssl-opt.sh and compat.sh tests.
|
||||
# This script is used to make the logs show up in the Travis test results.
|
||||
#
|
||||
# Some of the logs can be very long: this means usually a couple of megabytes
|
||||
# but it can be much more. For example, the client log of test 273 in ssl-opt.sh
|
||||
# is more than 630 Megabytes long.
|
||||
|
||||
if [ -d include/mbedtls ]; then :; else
|
||||
echo "$0: must be run from root" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FILES="o-srv-*.log o-cli-*.log c-srv-*.log c-cli-*.log o-pxy-*.log"
|
||||
MAX_LOG_SIZE=1048576
|
||||
|
||||
for PATTERN in $FILES; do
|
||||
for LOG in $( ls tests/$PATTERN 2>/dev/null ); do
|
||||
echo
|
||||
echo "****** BEGIN file: $LOG ******"
|
||||
echo
|
||||
tail -c $MAX_LOG_SIZE $LOG
|
||||
echo "****** END file: $LOG ******"
|
||||
echo
|
||||
rm $LOG
|
||||
done
|
||||
done
|
||||
Reference in New Issue
Block a user