From 3c138194ef6684b4594407515f82d20aa29322a9 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 14 Jan 2025 19:13:07 +0000 Subject: [PATCH 01/62] sanity-check: set shellcheck level to warning This patch locks shellcheck severity level to "warning". All "info" level messages will be ignored. Signed-off-by: Milosz Wasilewski --- sanity-check.sh | 5 ++++- validate.py | 11 ++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/sanity-check.sh b/sanity-check.sh index 1a2fa3e4a..9af667de6 100755 --- a/sanity-check.sh +++ b/sanity-check.sh @@ -4,7 +4,8 @@ set -ex python3 validate.py \ -r build-error.txt \ -p E501 W503 E203 \ - -s SC1091 SC2230 SC3043 + -s SC1091 SC2230 SC3043 \ + -l warning # pycodestyle checks skipped: # E501: line too long @@ -18,3 +19,5 @@ python3 validate.py \ # Reason: 'which' is widely used and supported. And 'command' applets isn't # available in busybox, refer to https://busybox.net/downloads/BusyBox.html # SC2230: which is non-standard. Use builtin 'command -v' instead. + +# "warning" is the default severity level for shellcheck diff --git a/validate.py b/validate.py index bac8cbfea..aa44eb1af 100755 --- a/validate.py +++ b/validate.py @@ -221,7 +221,7 @@ def validate_yaml(filename, args): return 0 -def validate_shell(filename, ignore_options): +def validate_shell(filename, args): ignore_string = "" if args.shellcheck_ignore is not None: # Exclude types of warnings in the following format: @@ -229,6 +229,7 @@ def validate_shell(filename, ignore_options): ignore_string = "-e %s" % ",".join(args.shellcheck_ignore) if len(ignore_string) < 4: # contains only "-e " ignore_string = "" + ignore_string = "-S %s %s" % (args.shellcheck_level, ignore_string) cmd = "shellcheck %s" % ignore_string return validate_external(cmd, filename, "SHELLCHECK", args) @@ -341,6 +342,14 @@ def main(args): help="Space separated list of shellcheck exclusions", dest="shellcheck_ignore", ) + parser.add_argument( + "-l", + "--shellcheck-level", + default="warning", + help="Shellcheck level set with -S", + dest="shellcheck_level", + ) + parser.add_argument( "-g", "--git-latest", From 881b65ae280b1710d1a880eaf9b4d58217fa3aed Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 14 Jan 2025 19:15:42 +0000 Subject: [PATCH 02/62] sanity-check: add more comments to the script Explain why SC3043 is ignored in this repository Signed-off-by: Milosz Wasilewski --- sanity-check.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sanity-check.sh b/sanity-check.sh index 9af667de6..3f5c794d1 100755 --- a/sanity-check.sh +++ b/sanity-check.sh @@ -20,4 +20,7 @@ python3 validate.py \ # available in busybox, refer to https://busybox.net/downloads/BusyBox.html # SC2230: which is non-standard. Use builtin 'command -v' instead. +# SC3043: In POSIC 'local' is undefined but it's widely used. For this reason +# it is acceptable to use 'local' in this repository + # "warning" is the default severity level for shellcheck From b52bb271bb5917e95a6937daafd9ffcf8dceb7e3 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 14 Jan 2025 19:21:37 +0000 Subject: [PATCH 03/62] automated: android: disable SC2320 checks Signed-off-by: Milosz Wasilewski --- automated/android/bootchart/device-script.sh | 2 ++ .../linaro-android-userspace-tests.sh | 3 +++ 2 files changed, 5 insertions(+) diff --git a/automated/android/bootchart/device-script.sh b/automated/android/bootchart/device-script.sh index 902616ad5..db332a122 100755 --- a/automated/android/bootchart/device-script.sh +++ b/automated/android/bootchart/device-script.sh @@ -1,9 +1,11 @@ #!/system/bin/sh # shellcheck disable=SC2181 +# shellcheck disable=SC2320 # # script to start and stop bootchart test. # # Copyright (C) 2014, Linaro Limited. +# Copyright (C) 2025, Qualcomm Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License diff --git a/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh b/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh index 640935dd0..0f9902a56 100755 --- a/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh +++ b/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh @@ -4,6 +4,7 @@ # shellcheck disable=SC2181 # shellcheck disable=SC2155 # shellcheck disable=SC2166 +# shellcheck disable=SC2320 # shellcheck disable=SC3006 # shellcheck disable=SC3010 # shellcheck disable=SC3018 @@ -12,6 +13,7 @@ # shellcheck disable=SC3060 ############################################################################# # Copyright (c) 2014 Linaro +# Copyright (c) 2025 Qualcomm Inc # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # which accompanies this distribution, and is available at @@ -19,6 +21,7 @@ # # Contributors: # Linaro +# Milosz Wasilewski ############################################################################# # Individual and batch test definitions From 649fbe3938e0fc800df3dfd734dea3d6f76c969c Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Fri, 10 Jan 2025 10:49:38 +0000 Subject: [PATCH 04/62] automated: linux: update bootrr definition bootrr official upstream repository moved to https://github.com/linux-msm/bootrr. On top of that bootrr now executes tests based on the DTB. It is still possible to overwrite the detected DTB but it's not a recommended way of running the test. On the systems without package manager the bootrr package should be preinstalled before running the test. Signed-off-by: Milosz Wasilewski --- automated/linux/bootrr/bootrr.sh | 48 ++++++++++++++++++++++++++++++ automated/linux/bootrr/bootrr.yaml | 21 +++++++------ 2 files changed, 58 insertions(+), 11 deletions(-) create mode 100755 automated/linux/bootrr/bootrr.sh diff --git a/automated/linux/bootrr/bootrr.sh b/automated/linux/bootrr/bootrr.sh new file mode 100755 index 000000000..bd03093d0 --- /dev/null +++ b/automated/linux/bootrr/bootrr.sh @@ -0,0 +1,48 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2025 Qualcomm Inc. + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +BOARD="" +REPOSITORY="https://github.com/linux-msm/bootrr" +SKIP_INSTALL="true" + +usage() { + echo "Usage: $0 [-b ] [-r ] [-s ]" 1>&2 + exit 1 +} + +while getopts "b:r:s:" o; do + case "$o" in + b) BOARD="${OPTARG}" ;; + r) REPOSITORY="${OPTARG}" ;; + s) SKIP_INSTALL="${OPTARG}" ;; + *) usage ;; + esac +done + +install() { + install_deps git "${SKIP_INSTALL}" + git clone "${REPOSITORY}" bootrr + cd bootrr || error_msg "bootrr cloning failed" + make DESTDIR=/ install +} + +! check_root && error_msg "This script must be run as root" + +if [ "${SKIP_INSTALL}" = "false" ] || [ "${SKIP_INSTALL}" = "False" ] || [ "${SKIP_INSTALL}" = "FALSE" ]; then + install +fi + +if [ -z "${BOARD}" ]; then + # bootrr tests are executed based on DTB + bootrr +else + # run tests for board that might not be compatible + BOOTRR_DIR="/usr/libexec/bootrr" + PATH="${BOOTRR_DIR}/helpers:${PATH}" + if [ -x "${BOOTRR_DIR}/boards/${BOARD}" ]; then + ${BOOTRR_DIR}/boards/"${BOARD}" + fi +fi diff --git a/automated/linux/bootrr/bootrr.yaml b/automated/linux/bootrr/bootrr.yaml index 084a0e3e4..c019955eb 100644 --- a/automated/linux/bootrr/bootrr.yaml +++ b/automated/linux/bootrr/bootrr.yaml @@ -1,9 +1,9 @@ metadata: format: Lava-Test Test Definition 1.0 name: bootrr - description: "Run bootrr https://github.com/andersson/bootrr" + description: "Run bootrr https://github.com/linux-msm/bootrr" maintainer: - - milosz.wasilewski@linaro.org + - milosz.wasilewski@oss.qualcomm.com - chase.qi@linaro.org os: - debian @@ -17,17 +17,16 @@ metadata: - apq8016-sbc - dragonboard-410c - dragonboard-820c + - qcs6490-rb3gen2 params: - # Refer to https://github.com/andersson/bootrr/tree/master/boards for boards supported. - BOARD: "arrow,apq8096-db820c" - SKIP_INSTALL: false + # Refer to https://github.com/linux-msm/bootrr/tree/master/boards for boards supported. + BOARD: "" + REPOSITORY: "https://github.com/linux-msm/bootrr" + SKIP_INSTALL: true run: steps: - - . ./automated/lib/sh-test-lib - - install_deps git "${SKIP_INSTALL}" - - git clone https://github.com/andersson/bootrr - - cd bootrr - - export PATH=$PWD/helpers:$PATH - - ./boards/${BOARD} + - cd automated/linux/bootrr + # bootrr produces LAVA friendly output + - ./bootrr.sh -r "${REPOSITORY}" -s "S{SKIP_INSTALL}" -b "${BOARD}" From f4e34695c37fd2ff23718716263e3ed63d211099 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 14 Jan 2025 17:07:07 +0000 Subject: [PATCH 05/62] automated: linux: add alsa-smoke test The test uses alsa-utils tools aplay and arecord to find whether any playback and record devices are available in the system. If the test runs on an OS without package manager and no alsa-utils available it will fall back to look for character devices named "controlCX" where X is a number. This is the same approach that alsa-utils uses. The default path to look for devices is /dev/snd/ Signed-off-by: Milosz Wasilewski --- automated/linux/alsa-smoke/alsa-smoke.sh | 65 ++++++++++++++++++++++ automated/linux/alsa-smoke/alsa-smoke.yaml | 28 ++++++++++ 2 files changed, 93 insertions(+) create mode 100755 automated/linux/alsa-smoke/alsa-smoke.sh create mode 100644 automated/linux/alsa-smoke/alsa-smoke.yaml diff --git a/automated/linux/alsa-smoke/alsa-smoke.sh b/automated/linux/alsa-smoke/alsa-smoke.sh new file mode 100755 index 000000000..e6dfdaca0 --- /dev/null +++ b/automated/linux/alsa-smoke/alsa-smoke.sh @@ -0,0 +1,65 @@ +#!/bin/sh + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +OUTPUT="$(pwd)/output" +RESULT_FILE="${OUTPUT}/result.txt" +export RESULT_FILE +REFERENCE_PATH="/dev/snd" +SKIP_INSTALL="False" + +usage() { + echo "Usage: $0 [-s ] [-p ]" 1>&2 + exit 1 +} + +while getopts "s:p:h" o; do + case "$o" in + s) SKIP_INSTALL="${OPTARG}" ;; + p) REFERENCE_PATH="${OPTARG}" ;; + h|*) usage ;; + esac +done + +install() { + dist_name + # shellcheck disable=SC2154 + case "${dist}" in + debian|ubuntu) install_deps "alsa-utils" "${SKIP_INSTALL}";; + fedora|centos) install_deps "alsa-utils" "${SKIP_INSTALL}";; + unknown) warn_msg "Unsupported distro: package install skipped" ;; + esac +} + +run() { + # shellcheck disable=SC3043 + local test_command="$1" + # shellcheck disable=SC3043 + local test_name="$2" + # shellcheck disable=SC2086 + if command -v ${test_command}; then + # shellcheck disable=SC2086 + if ${test_command} -l | grep "card [0-9]"; then + report_pass "${test_name}_devices" + else + report_fail "${test_name}_devices" + fi + else + # shellcheck disable=SC2086 + # shellcheck disable=SC2012 + DEVICES=$(find ${REFERENCE_PATH} -type c -name "controlC*" | wc -l) + if [ "${DEVICES}" -gt 0 ]; then + report_pass "${test_name}_devices" + else + report_fail "${test_name}_devices" + fi + fi +} + +# Test run. +create_out_dir "${OUTPUT}" + +install + +run aplay playback +run arecord record diff --git a/automated/linux/alsa-smoke/alsa-smoke.yaml b/automated/linux/alsa-smoke/alsa-smoke.yaml new file mode 100644 index 000000000..0fcb4775b --- /dev/null +++ b/automated/linux/alsa-smoke/alsa-smoke.yaml @@ -0,0 +1,28 @@ +metadata: + format: Lava-Test Test Definition 1.0 + name: alsa-smoke + description: "Run alsa smoke tests. The test checks if + there are playback and record devices available. + No actual playback or record tests are performed." + maintainer: + - milosz.wasilewski@oss.qualcomm.com + os: + - debian + - ubuntu + - centos + - fedora + - openembedded + scope: + - functional + devices: + - imx8mm-lpddr4-evk + +params: + SKIP_INSTALL: "False" + REFERENCE_PATH: "/dev/snd" + +run: + steps: + - cd ./automated/linux/alsa-smoke/ + - ./alsa-smoke.sh -s "${SKIP_INSTALL}" -p "${REFERENCE_PATH}" + - ../../utils/send-to-lava.sh ./output/result.txt From 41161a02a341a1cd63540c76a125cc2d0a4693e8 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 11 Feb 2025 11:56:29 +0000 Subject: [PATCH 06/62] automated: utils: remove distutils from test-runner distutils package was removed from python3.12. This patch removes distutils from test-runner and replaces it with shutil. Signed-off-by: Milosz Wasilewski --- automated/utils/test-runner.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/automated/utils/test-runner.py b/automated/utils/test-runner.py index d49bef623..751833201 100755 --- a/automated/utils/test-runner.py +++ b/automated/utils/test-runner.py @@ -16,7 +16,6 @@ import time from uuid import uuid4 from datetime import datetime -from distutils.spawn import find_executable try: @@ -844,7 +843,7 @@ def __init__(self, test, args): self.results["version"] = test_version.rstrip() os.chdir(path) self.lava_run = args.lava_run - if self.lava_run and not find_executable("lava-test-case"): + if self.lava_run and not shutil.which("lava-test-case"): self.logger.info( "lava-test-case not found, '-l' or '--lava_run' option ignored'" ) From 9e068e0baca9c2664f3f591078181e9f68e38767 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 11 Feb 2025 12:09:11 +0000 Subject: [PATCH 07/62] automated: lib: fix black warning Signed-off-by: Milosz Wasilewski --- automated/lib/py_util_lib.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/automated/lib/py_util_lib.py b/automated/lib/py_util_lib.py index a3da5ebfb..e695017b0 100644 --- a/automated/lib/py_util_lib.py +++ b/automated/lib/py_util_lib.py @@ -1,5 +1,4 @@ -"""Shared Python 3 utility code. -""" +"""Shared Python 3 utility code.""" from pathlib import Path import subprocess From aa509b937189a6df80033fa36c9174ad88799fef Mon Sep 17 00:00:00 2001 From: Yongqin Liu Date: Mon, 13 Jan 2025 20:00:14 +0800 Subject: [PATCH 08/62] upload-to-squad.sh: use "Authorization" in header for uploading instead of the "Auth-Token" way as suggested here: https://squad.readthedocs.io/en/latest/intro.html#submitting-results Reported-by: Benjamin Copeland Signed-off-by: Yongqin Liu --- automated/utils/upload-to-squad.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/utils/upload-to-squad.sh b/automated/utils/upload-to-squad.sh index 4f3ad8d4b..9dc8c9e03 100755 --- a/automated/utils/upload-to-squad.sh +++ b/automated/utils/upload-to-squad.sh @@ -63,7 +63,7 @@ if command -v lava-test-reference > /dev/null 2>&1; then i=1 while [ $i -le "${RETRY_COUNT}" ]; do # response is the squad testrun id when succeed - response=$(curl ${CURL_VERBOSE_FLAG} --header "Auth-Token: ${SQUAD_ARCHIVE_SUBMIT_TOKEN}" --form "attachment=@${ATTACHMENT}" "${ARTIFACTORIAL_URL}") + response=$(curl ${CURL_VERBOSE_FLAG} --header "Authorization: token ${SQUAD_ARCHIVE_SUBMIT_TOKEN}" --form "attachment=@${ATTACHMENT}" "${ARTIFACTORIAL_URL}") # generate the SQUAD url for download and report pass when uploading succeed if echo "${response}" | grep -E "^[0-9]+$"; then From 48fa9013fe31d00c161bad3c36187ef4570f2617 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 27 Feb 2025 14:32:28 +0100 Subject: [PATCH 09/62] automated: linux: ltp: remove kirk's report.json after parsing it When running multiple ltp suites without rebooting, the following error is shown when running kirk the second time: 'kirk: error: JSON report file already exists: /tmp/kirk-report.json' Remove the kirk-report.json file after parsing the file. Signed-off-by: Anders Roxell --- automated/linux/ltp/ltp.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index d1b03dc05..63c99e18b 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -193,6 +193,7 @@ run_ltp() { --json-report /tmp/kirk-report.json \ --verbose" "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" + rm "/tmp/kirk-report.json" else pipe0_status "./runltp -p -q -f shardfile \ -l ${OUTPUT}/LTP_${LOG_FILE}.log \ From a979f01c2f45668a1febbab3b0d7494ae6cdf4b5 Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Fri, 7 Mar 2025 13:25:14 -0300 Subject: [PATCH 10/62] automated: linux: ltp: fix typo Signed-off-by: Antonio Terceiro --- automated/linux/ltp/ltp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/ltp/ltp.yaml b/automated/linux/ltp/ltp.yaml index e6c1b2a01..8ec8ce9b0 100644 --- a/automated/linux/ltp/ltp.yaml +++ b/automated/linux/ltp/ltp.yaml @@ -41,7 +41,7 @@ params: # New kirk runner (https://github.com/linux-test-project/kirk.git) # Needs to be installed onto the rootfs. - # Set RUNNER to full path to kik or to kirk if its in the PATH. + # Set RUNNER to full path to kirk or to kirk if its in the PATH. RUNNER: "" # If the following parameter is set, then the LTP suite is From a1312792895c9a65cf947095c74b6e7750d11fef Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Fri, 7 Mar 2025 15:00:15 -0300 Subject: [PATCH 11/62] automated: linux: ltp: allow not passing a skipfile to kirk Signed-off-by: Antonio Terceiro --- automated/linux/ltp/ltp.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 63c99e18b..e3eb725ac 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -189,7 +189,7 @@ run_ltp() { fi pipe0_status "${RUNNER} --framework ltp --run-suite shardfile \ -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ - --skip-file ${SKIPFILE_PATH} \ + ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ --json-report /tmp/kirk-report.json \ --verbose" "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" From 45a4d4add5d60b73b4a23e6f011daeb92445b997 Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Mon, 10 Mar 2025 16:41:00 -0300 Subject: [PATCH 12/62] send-to-lava: add support for publishing log files For each test, send-to-lava will look into a results directory for files named ${test}.log. If that file is found, then its contents will be pushed to lava as the log snippet corresponding to that test. The result directory can be passed as the second command line argument. If omitted, the same directory as the results file will be used. Signed-off-by: Antonio Terceiro --- automated/utils/send-to-lava.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/automated/utils/send-to-lava.sh b/automated/utils/send-to-lava.sh index 36d678e62..d80723791 100755 --- a/automated/utils/send-to-lava.sh +++ b/automated/utils/send-to-lava.sh @@ -1,6 +1,22 @@ #!/bin/sh RESULT_FILE="$1" +RESULT_DIR="$2" +if [ -z "${RESULT_DIR}" ]; then + RESULT_DIR="$(dirname "${RESULT_FILE}")" +fi + +show_output() { + test_name="$1" + test_output="${RESULT_DIR}/${test_name}.log" + if [ -r "$test_output" ]; then + echo "" + cat "$test_output" + echo "" + else + echo "WARNING: no log file found for ${test_name} at ${test_output}" >&2 + fi +} command -v lava-test-case > /dev/null 2>&1 lava_test_case="$?" @@ -13,6 +29,7 @@ if [ -f "${RESULT_FILE}" ]; then test="${line%% *}" result="${line##* }" + show_output "${test}" if [ "${lava_test_case}" -eq 0 ]; then lava-test-case "${test}" --result "${result}" else @@ -24,6 +41,7 @@ if [ -f "${RESULT_FILE}" ]; then measurement="$(echo "${line}" | awk '{print $3}')" units="$(echo "${line}" | awk '{print $4}')" + show_output "${test}" if [ "${lava_test_case}" -eq 0 ]; then if [ -n "${units}" ]; then lava-test-case "${test}" --result "${result}" --measurement "${measurement}" --units "${units}" From aac60d98ed35b6630f6076aff2c747a1f4576566 Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Mon, 10 Mar 2025 16:44:48 -0300 Subject: [PATCH 13/62] automated: linux: ltp: publish log files for kirk runs kirk produces a JSON results file with the logs already collected, what's very convenient. Signed-off-by: Antonio Terceiro --- automated/linux/ltp/ltp.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index e3eb725ac..3becdd4fd 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -163,6 +163,9 @@ parse_ltp_output() { parse_ltp_json_results() { jq -r '.results| .[]| "\(.test_fqn) \(.test.result)"' "$1" \ | sed 's/brok/fail/; s/conf/skip/' >> "${RESULT_FILE}" + for test_fqn in $(jq -r '.results| .[]| .test_fqn' "$1"); do + jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.log' "$1" > ${OUTPUT}/${test_fqn}.log + done } # Run LTP test suite From 3462ad8306fd59d6b5d31114a2317f8ab64fadd1 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Sun, 16 Mar 2025 03:30:16 +0100 Subject: [PATCH 14/62] automation: linux: pmqtest: fix indentation Signed-off-by: Anders Roxell --- automated/linux/pmqtest/pmqtest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/pmqtest/pmqtest.sh b/automated/linux/pmqtest/pmqtest.sh index 57ecfba5a..d06bbb9ec 100755 --- a/automated/linux/pmqtest/pmqtest.sh +++ b/automated/linux/pmqtest/pmqtest.sh @@ -19,7 +19,7 @@ usage() { while getopts ":D:w:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; *) usage ;; esac done From b6f14bbd52a7bf47cfb175ff2d7058fa9c69b363 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Sun, 16 Mar 2025 03:33:00 +0100 Subject: [PATCH 15/62] automated: linux: pi-stress: fix indentation Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 6ad0d179f..938e3e691 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -26,7 +26,7 @@ while getopts ":D:m:r:w" opt; do D) DURATION="${OPTARG}" ;; m) MLOCKALL="${OPTARG}" ;; r) RR="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; *) usage ;; esac done From 5333a7557b2ae200bd142ca13463fadb6230346f Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Sun, 16 Mar 2025 03:35:49 +0100 Subject: [PATCH 16/62] automated: linux: signaltest: fix indentation Signed-off-by: Anders Roxell --- automated/linux/signaltest/signaltest.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/automated/linux/signaltest/signaltest.sh b/automated/linux/signaltest/signaltest.sh index e7db4f3a2..fc044b08d 100755 --- a/automated/linux/signaltest/signaltest.sh +++ b/automated/linux/signaltest/signaltest.sh @@ -22,8 +22,8 @@ while getopts ":p:t:D:w:" opt; do case "${opt}" in p) PRIORITY="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; - D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + D) DURATION="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; *) usage ;; esac done From 845a37b6d7ca08f29297d8bd7a0e322da033940a Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Sun, 16 Mar 2025 03:38:55 +0100 Subject: [PATCH 17/62] automated: linux: rt-migrate-test: fix indentation Signed-off-by: Anders Roxell --- automated/linux/rt-migrate-test/rt-migrate-test.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.sh b/automated/linux/rt-migrate-test/rt-migrate-test.sh index a11eda147..d53e8ca9b 100755 --- a/automated/linux/rt-migrate-test/rt-migrate-test.sh +++ b/automated/linux/rt-migrate-test/rt-migrate-test.sh @@ -18,9 +18,9 @@ usage() { while getopts ":l:p:D:w:" opt; do case "${opt}" in - p) PRIORITY="${OPTARG}" ;; - D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + p) PRIORITY="${OPTARG}" ;; + D) DURATION="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; *) usage ;; esac done From 0d993fa750557fdbb2d448d600cda5918e19f29c Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Sun, 16 Mar 2025 03:42:44 +0100 Subject: [PATCH 18/62] automated: linux: cyclicdeadline: fix indentation Signed-off-by: Anders Roxell --- automated/linux/cyclicdeadline/cyclicdeadline.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 20e584b46..c98db304b 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -27,7 +27,7 @@ while getopts ":i:s:t:D:w:" opt; do s) STEP="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; *) usage ;; esac done From 9ebdd88475bb215a89a0819a9c413d29937efc58 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 21 Mar 2025 15:24:14 +0100 Subject: [PATCH 19/62] automated: linux: pmqtest: add iterations This will make it possible to run multiple iterations to gather multiple min/max/avg values. Signed-off-by: Anders Roxell --- automated/linux/pmqtest/pmqtest.sh | 22 ++++++++++++++++------ automated/linux/pmqtest/pmqtest.yaml | 3 ++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/automated/linux/pmqtest/pmqtest.sh b/automated/linux/pmqtest/pmqtest.sh index d06bbb9ec..fc7068afd 100755 --- a/automated/linux/pmqtest/pmqtest.sh +++ b/automated/linux/pmqtest/pmqtest.sh @@ -6,20 +6,22 @@ TEST_DIR=$(dirname "$(realpath "$0")") OUTPUT="${TEST_DIR}/output" -LOGFILE="${OUTPUT}/pmqtest.json" +LOGFILE="${OUTPUT}/pmqtest" RESULT_FILE="${OUTPUT}/result.txt" DURATION="5m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-D duration] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-D duration] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":D:w:" opt; do +while getopts ":D:w:i:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -38,10 +40,18 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -S -p 98 -D "${DURATION}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -S -p 98 -D "${DURATION}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py pmqtest "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py pmqtest "${LOGFILE}-${i}.json" \ + | tee "${RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + fi +done diff --git a/automated/linux/pmqtest/pmqtest.yaml b/automated/linux/pmqtest/pmqtest.yaml index b23b5437f..0987152ed 100644 --- a/automated/linux/pmqtest/pmqtest.yaml +++ b/automated/linux/pmqtest/pmqtest.yaml @@ -35,10 +35,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/pmqtest/ - - ./pmqtest.sh -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./pmqtest.sh -D "${DURATION}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/pmqtest.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From c6056a11448f4656009890d8ca318bd691c06aed Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 21 Mar 2025 15:22:01 +0100 Subject: [PATCH 20/62] automated: lib: parse_rt_tests_results: pi-stress: fix output Signed-off-by: Anders Roxell --- automated/lib/parse_rt_tests_results.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/automated/lib/parse_rt_tests_results.py b/automated/lib/parse_rt_tests_results.py index 9cc0f1e4f..5c966c590 100755 --- a/automated/lib/parse_rt_tests_results.py +++ b/automated/lib/parse_rt_tests_results.py @@ -69,9 +69,9 @@ def parse_json(testname, filename): # rlta timertat also knows about irqs parse_irqs(rawdata) - elif "inversions" in rawdata: + elif "inversion" in rawdata: # pi_stress - print("inversion {}\n".format(rawdata("inversions"))) + print("inversion pass {} count\n".format(rawdata["inversion"])) if int(rawdata["return_code"]) == 0: print("{} pass".format(testname)) From 970fdbc85abdc7d649d6f5cc8c1503e05ee42d57 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 21 Feb 2025 19:52:41 +0100 Subject: [PATCH 21/62] automated: linux: pi-stress: add iterations This will make it possible to run multiple iterations to gather multiple min/max/avg values. Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 22 ++++++++++++++++------ automated/linux/pi-stress/pi-stress.yaml | 3 ++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 938e3e691..a030e4c2b 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -7,7 +7,7 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/pi-stress.json" +LOGFILE="${OUTPUT}/pi-stress" RESULT_FILE="${OUTPUT}/result.txt" export RESULT_FILE @@ -15,18 +15,20 @@ DURATION="5m" MLOCKALL="false" RR="false" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":D:m:r:w" opt; do +while getopts ":D:m:r:w:i:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; m) MLOCKALL="${OPTARG}" ;; r) RR="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -57,10 +59,18 @@ background_process_start bgcmd --cmd "${BACKGROUND_CMD}" # test script. Catch and ignore it with trap. trap '' TERM # shellcheck disable=SC2086 -"${binary}" -q --duration "${DURATION}" ${MLOCKALL} ${RR} --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q --duration "${DURATION}" ${MLOCKALL} ${RR} --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py pi-stress "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py pi-stress "${LOGFILE}-${i}.json" \ + | tee "${RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + fi +done diff --git a/automated/linux/pi-stress/pi-stress.yaml b/automated/linux/pi-stress/pi-stress.yaml index 64a5b784f..2c553cae2 100644 --- a/automated/linux/pi-stress/pi-stress.yaml +++ b/automated/linux/pi-stress/pi-stress.yaml @@ -40,10 +40,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd automated/linux/pi-stress - - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -w "${BACKGROUND_CMD}" + - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/pi-stress.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 2c35bc0d1494a6ff5c117ad2b80608768b63b3b4 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 27 Feb 2025 20:17:45 +0100 Subject: [PATCH 22/62] automated: linux: signaltest: add iterations This will make it possible to run multiple iterations to gather multiple min/max/avg values. Signed-off-by: Anders Roxell --- automated/linux/signaltest/signaltest.sh | 22 ++++++++++++++++------ automated/linux/signaltest/signaltest.yaml | 3 ++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/automated/linux/signaltest/signaltest.sh b/automated/linux/signaltest/signaltest.sh index fc044b08d..9069134ac 100755 --- a/automated/linux/signaltest/signaltest.sh +++ b/automated/linux/signaltest/signaltest.sh @@ -5,25 +5,27 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/signaltest.json" +LOGFILE="${OUTPUT}/signaltest" RESULT_FILE="${OUTPUT}/result.txt" PRIORITY="98" THREADS="2" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-r runtime] [-p priority] [-t threads] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-r runtime] [-p priority] [-t threads] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":p:t:D:w:" opt; do +while getopts ":p:t:D:w:i:" opt; do case "${opt}" in p) PRIORITY="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -44,10 +46,18 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -D "${DURATION}" -a -m -p "${PRIORITY}" -t "${THREADS}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -D "${DURATION}" -a -m -p "${PRIORITY}" -t "${THREADS}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py signaltest "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py signaltest "${LOGFILE}-${i}.json" \ + | tee "${RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + fi +done diff --git a/automated/linux/signaltest/signaltest.yaml b/automated/linux/signaltest/signaltest.yaml index 98bc808d2..6307abcfb 100644 --- a/automated/linux/signaltest/signaltest.yaml +++ b/automated/linux/signaltest/signaltest.yaml @@ -41,10 +41,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/signaltest - - ./signaltest.sh -D "${DURATION}" -p "${PRIORITY}" -t "${THREADS}" -w "${BACKGROUND_CMD}" + - ./signaltest.sh -D "${DURATION}" -p "${PRIORITY}" -t "${THREADS}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/signaltest.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 8ed13f7e166328925187077641efacffe89f3513 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 28 Feb 2025 22:39:34 +0100 Subject: [PATCH 23/62] automated: linux: rt-migrate-test: add iterations This will make it possible to run multiple iterations to gather multiple min/max/avg values. Signed-off-by: Anders Roxell --- .../linux/rt-migrate-test/rt-migrate-test.sh | 23 ++++++++++++++----- .../rt-migrate-test/rt-migrate-test.yaml | 3 ++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.sh b/automated/linux/rt-migrate-test/rt-migrate-test.sh index d53e8ca9b..5ab04c497 100755 --- a/automated/linux/rt-migrate-test/rt-migrate-test.sh +++ b/automated/linux/rt-migrate-test/rt-migrate-test.sh @@ -5,22 +5,24 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/rt-migrate-test.json" +LOGFILE="${OUTPUT}/rt-migrate-test" RESULT_FILE="${OUTPUT}/result.txt" PRIORITY="51" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-p priority] [-D duration] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-p priority] [-D duration] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":l:p:D:w:" opt; do +while getopts ":l:p:D:w:i:" opt; do case "${opt}" in p) PRIORITY="${OPTARG}" ;; D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -37,10 +39,19 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -p "${PRIORITY}" -D "${DURATION}" -c --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -p "${PRIORITY}" -D "${DURATION}" -c --json="${LOGFILE}-${i}.json" +done + background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py rt-migrate-test "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py rt-migrate-test "${LOGFILE}-${i}.json" \ + | tee "${RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + fi +done diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.yaml b/automated/linux/rt-migrate-test/rt-migrate-test.yaml index 8646ef6d5..6674447cb 100644 --- a/automated/linux/rt-migrate-test/rt-migrate-test.yaml +++ b/automated/linux/rt-migrate-test/rt-migrate-test.yaml @@ -37,10 +37,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/rt-migrate-test/ - - ./rt-migrate-test.sh -p "${PRIORITY}" -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./rt-migrate-test.sh -p "${PRIORITY}" -D "${DURATION}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/rt-migrate-test.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 24a05ce87b0f7bdcac729bf9594c8aca63030fcf Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 28 Feb 2025 23:17:36 +0100 Subject: [PATCH 24/62] automated: linux: cyclicdeadline: add iterations This will make it possible to run multiple iterations to gather multiple min/max/avg values. Signed-off-by: Anders Roxell --- .../linux/cyclicdeadline/cyclicdeadline.sh | 24 +++++++++++++------ .../linux/cyclicdeadline/cyclicdeadline.yaml | 3 ++- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index c98db304b..0d8c12c46 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -7,7 +7,7 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/cyclicdeadline.json" +LOGFILE="${OUTPUT}/cyclicdeadline" RESULT_FILE="${OUTPUT}/result.txt" INTERVAL="1000" @@ -15,19 +15,21 @@ STEP="500" THREADS="1" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations]" 1>&2 exit 1 } -while getopts ":i:s:t:D:w:" opt; do +while getopts ":i:s:t:D:w:I:" opt; do case "${opt}" in i) INTERVAL="${OPTARG}" ;; s) STEP="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; + I) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -48,11 +50,19 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" \ - -D "${DURATION}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" \ + -D "${DURATION}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py cyclicdeadline "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py cyclicdeadline "${LOGFILE}-${i}.json" \ + | tee "${RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + fi +done diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.yaml b/automated/linux/cyclicdeadline/cyclicdeadline.yaml index 6767399b9..7405ee669 100644 --- a/automated/linux/cyclicdeadline/cyclicdeadline.yaml +++ b/automated/linux/cyclicdeadline/cyclicdeadline.yaml @@ -45,10 +45,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/cyclicdeadline/ - - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/cyclicdeadline.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 9d6d8c7431919b04478df78ae3e078e2dc190622 Mon Sep 17 00:00:00 2001 From: Theodore Grey Date: Mon, 24 Mar 2025 18:16:57 -0400 Subject: [PATCH 25/62] automated: linux: kselftest: publish kselftest logs Currently kselftest logs for failures aren't displaying full logs. This change creates log files for each failure and tracks the results. Signed-off-by: Theodore Grey --- automated/linux/kselftest/parse-output.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/automated/linux/kselftest/parse-output.py b/automated/linux/kselftest/parse-output.py index 976a1d5cd..eea3aaacd 100755 --- a/automated/linux/kselftest/parse-output.py +++ b/automated/linux/kselftest/parse-output.py @@ -40,18 +40,24 @@ def make_result(ok, skip): output = "" ps = parser.Parser() + test_name = None for l in ps.parse_text(string): if l.category == "test": + test_name = make_name(l.description, l.directive.text, l.ok, l.skip) results.append( { "name": make_name(l.description, l.directive.text, l.ok, l.skip), "result": make_result(l.ok, l.skip), "children": parse_nested_tap(output), + "logs": f"{l.directive.text}", } ) output = "" elif l.category == "diagnostic": output += f"{uncomment(l.text)}\n" + for r in results: + if r["name"] == test_name and not None: + r["logs"] += f"{uncomment(l.text)}\n" return results @@ -61,7 +67,8 @@ def flatten_results(prefix, results): for r in results: test = f"{prefix}{r['name']}" children = flatten_results(f"{test}_", r["children"]) - ret += children + [{"name": test, "result": r["result"]}] + output = r["logs"] + ret += children + [{"name": test, "result": r["result"], "logs": output}] return ret @@ -74,9 +81,22 @@ def make_names_unique(results): r["name"] += f"_dup{namecounts[name]}" +def make_log_files(results): + for r in results: + name = r["name"] + if r["result"] == "fail": + try: + log_file = open(f"output/thisshouldntwork/{name}.log", "w") + log_file.writelines(r["logs"]) + log_file.close() + except OSError as e: + print(f"Error writing to file output/{name}.log: {e}") + + if __name__ == "__main__": results = parse_nested_tap(sys.stdin.read()) results = flatten_results("", results) make_names_unique(results) + make_log_files(results) for r in results: print(f"{r['name']} {r['result']}") From 219912aa21767b1d29aa9c66bd89486a975559f0 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 1 Apr 2025 23:30:47 +0200 Subject: [PATCH 26/62] automated: linux: pmqtest: parse: fix iteration parsing If iterations > 1, the tests will be named 'iteration-' where number = iterations that was passed in, so all the other iterations will be overwritten. mening if iterations=3, then it will look like this: iteration-3-t0-min-latency: pass And no iterations-1-* or iterations-2-*. Solve this by manipulating a temporary file and extend the result file with the temporary file. Fixes: 9ebdd88475bb ("automated: linux: pmqtest: add iterations") Signed-off-by: Anders Roxell --- automated/linux/pmqtest/pmqtest.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/automated/linux/pmqtest/pmqtest.sh b/automated/linux/pmqtest/pmqtest.sh index fc7068afd..5edd6de89 100755 --- a/automated/linux/pmqtest/pmqtest.sh +++ b/automated/linux/pmqtest/pmqtest.sh @@ -8,6 +8,8 @@ TEST_DIR=$(dirname "$(realpath "$0")") OUTPUT="${TEST_DIR}/output" LOGFILE="${OUTPUT}/pmqtest" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" + DURATION="5m" BACKGROUND_CMD="" ITERATIONS=1 @@ -49,9 +51,10 @@ background_process_stop bgcmd # Parse test log. for i in $(seq ${ITERATIONS}); do ../../lib/parse_rt_tests_results.py pmqtest "${LOGFILE}-${i}.json" \ - | tee "${RESULT_FILE}" + | tee "${TMP_RESULT_FILE}" if [ ${ITERATIONS} -ne 1 ]; then - sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done From 166ebc61ba594ac3426acc39a4b8896621e4b24f Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 1 Apr 2025 23:37:50 +0200 Subject: [PATCH 27/62] automated: linux: pi-stress: parse: fix iteration parsing If iterations > 1, the tests will be named 'iteration-' where number = iterations that was passed in, so all the other iterations will be overwritten. mening if iterations=3, then it will look like this: iteration-3-t0-min-latency: pass And no iterations-1-* or iterations-2-*. Solve this by manipulating a temporary file and extend the result file with the temporary file. Fixes: 970fdbc85abd ("automated: linux: pi-stress: add iterations") Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index a030e4c2b..dfa935f6f 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -9,6 +9,7 @@ OUTPUT="$(pwd)/output" LOGFILE="${OUTPUT}/pi-stress" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" export RESULT_FILE DURATION="5m" @@ -68,9 +69,10 @@ background_process_stop bgcmd # Parse test log. for i in $(seq ${ITERATIONS}); do ../../lib/parse_rt_tests_results.py pi-stress "${LOGFILE}-${i}.json" \ - | tee "${RESULT_FILE}" + | tee "${TMP_RESULT_FILE}" if [ ${ITERATIONS} -ne 1 ]; then - sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done From f5ff066d20ebec9266dbe61c4b3b479f5810ad3b Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 1 Apr 2025 23:39:07 +0200 Subject: [PATCH 28/62] automated: linux: signaltest: parse: fix iteration parsing If iterations > 1, the tests will be named 'iteration-' where number = iterations that was passed in, so all the other iterations will be overwritten. mening if iterations=3, then it will look like this: iteration-3-t0-min-latency: pass And no iterations-1-* or iterations-2-*. Solve this by manipulating a temporary file and extend the result file with the temporary file. Fixes: 2c35bc0d1494 ("automated: linux: signaltest: add iterations") Signed-off-by: Anders Roxell --- automated/linux/signaltest/signaltest.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/automated/linux/signaltest/signaltest.sh b/automated/linux/signaltest/signaltest.sh index 9069134ac..578527aa2 100755 --- a/automated/linux/signaltest/signaltest.sh +++ b/automated/linux/signaltest/signaltest.sh @@ -7,6 +7,7 @@ OUTPUT="$(pwd)/output" LOGFILE="${OUTPUT}/signaltest" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" PRIORITY="98" THREADS="2" @@ -55,9 +56,10 @@ background_process_stop bgcmd # Parse test log. for i in $(seq ${ITERATIONS}); do ../../lib/parse_rt_tests_results.py signaltest "${LOGFILE}-${i}.json" \ - | tee "${RESULT_FILE}" + | tee "${TMP_RESULT_FILE}" if [ ${ITERATIONS} -ne 1 ]; then - sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done From 322731b680f1f649b65001d4da246dd0cfb9984b Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 1 Apr 2025 23:40:33 +0200 Subject: [PATCH 29/62] automated: linux: rt-migrate-test: parse: fix iteration parsing automated: linux: pmqtest: parse: fix iteration parsing If iterations > 1, the tests will be named 'iteration-' where number = iterations that was passed in, so all the other iterations will be overwritten. mening if iterations=3, then it will look like this: iteration-3-t0-min-latency: pass And no iterations-1-* or iterations-2-*. Solve this by manipulating a temporary file and extend the result file with the temporary file. Fixes: 8ed13f7e1663 ("automated: linux: rt-migrate-test: add iterations") Signed-off-by: Anders Roxell --- automated/linux/rt-migrate-test/rt-migrate-test.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.sh b/automated/linux/rt-migrate-test/rt-migrate-test.sh index 5ab04c497..c3ab0af78 100755 --- a/automated/linux/rt-migrate-test/rt-migrate-test.sh +++ b/automated/linux/rt-migrate-test/rt-migrate-test.sh @@ -7,6 +7,8 @@ OUTPUT="$(pwd)/output" LOGFILE="${OUTPUT}/rt-migrate-test" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" + PRIORITY="51" DURATION="1m" BACKGROUND_CMD="" @@ -49,9 +51,10 @@ background_process_stop bgcmd # Parse test log. for i in $(seq ${ITERATIONS}); do ../../lib/parse_rt_tests_results.py rt-migrate-test "${LOGFILE}-${i}.json" \ - | tee "${RESULT_FILE}" + | tee "${TMP_RESULT_FILE}" if [ ${ITERATIONS} -ne 1 ]; then - sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done From 18fbcf2370292b6de1c8a34030eadcc9c0467916 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 1 Apr 2025 23:42:20 +0200 Subject: [PATCH 30/62] automated: linux: cyclicdeadline: parse: fix iteration parsing If iterations > 1, the tests will be named 'iteration-' where number = iterations that was passed in, so all the other iterations will be overwritten. mening if iterations=3, then it will look like this: iteration-3-t0-min-latency: pass And no iterations-1-* or iterations-2-*. Solve this by manipulating a temporary file and extend the result file with the temporary file. Fixes: 24a05ce87b0f ("automated: linux: cyclicdeadline: add iterations") Signed-off-by: Anders Roxell --- automated/linux/cyclicdeadline/cyclicdeadline.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 0d8c12c46..625c8d08b 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -9,6 +9,7 @@ OUTPUT="$(pwd)/output" LOGFILE="${OUTPUT}/cyclicdeadline" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" INTERVAL="1000" STEP="500" @@ -60,9 +61,10 @@ background_process_stop bgcmd # Parse test log. for i in $(seq ${ITERATIONS}); do ../../lib/parse_rt_tests_results.py cyclicdeadline "${LOGFILE}-${i}.json" \ - | tee "${RESULT_FILE}" + | tee "${TMP_RESULT_FILE}" if [ ${ITERATIONS} -ne 1 ]; then - sed -i "s|^|iteration-${i}-|g" "${RESULT_FILE}" + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done From 86b92d59796073bdd6f4f786df76d4732ebbdbf6 Mon Sep 17 00:00:00 2001 From: liuyq Date: Wed, 2 Apr 2025 15:28:07 +0800 Subject: [PATCH 31/62] noninteractive-tradefed: add java path support for aarch64 (#555) The java path on aarch64 is something like /usr/lib/jvm/java-11-openjdk-arm64/bin/java which is different from the path on x86_64 like /usr/lib/jvm/java-11-openjdk-amd64/bin/java change to use differnt paths for different architectures accordingly to support the aarch64 host as well. Signed-off-by: Yongqin Liu --- automated/android/noninteractive-tradefed/setup.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/automated/android/noninteractive-tradefed/setup.sh b/automated/android/noninteractive-tradefed/setup.sh index 0c8f004a4..1fd3b546b 100755 --- a/automated/android/noninteractive-tradefed/setup.sh +++ b/automated/android/noninteractive-tradefed/setup.sh @@ -5,10 +5,15 @@ . ../../lib/sh-test-lib . ../../lib/android-test-lib -java_path="/usr/lib/jvm/java-11-openjdk-amd64/bin/java" +## To enable running x86_64 binary on aarch64 host or inside container of it +java_path_arch_str="amd64" +if [ "X$(uname -m)" = "Xaarch64" ]; then + java_path_arch_str="arm64" +fi +java_path="/usr/lib/jvm/java-11-openjdk-${java_path_arch_str}/bin/java" if [ -n "${ANDROID_VERSION}" ] && echo "${ANDROID_VERSION}" | grep -E -q "aosp-android14|aosp-main"; then # use openjdk-17 for Android14+ versions - java_path="/usr/lib/jvm/java-17-openjdk-amd64/bin/java" + java_path="/usr/lib/jvm/java-17-openjdk-${java_path_arch_str}/bin/java" fi dist_name From 6bcdc258a723e5a2e2377be338b59c09e876a61e Mon Sep 17 00:00:00 2001 From: TheodoreGrey Date: Tue, 8 Apr 2025 10:30:10 -0400 Subject: [PATCH 32/62] automated: linux: kselftest: fix kselftest log publish In kselftest/parse-output.py a change made for debugging was mistakenly added in a previous commit, which is causing kselftest to not properly create and find extended error log files. Fixes: 9d6d8c743191 ("automated: linux: kselftest: publish kselftest logs") Signed-off-by: TheodoreGrey --- automated/linux/kselftest/parse-output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/kselftest/parse-output.py b/automated/linux/kselftest/parse-output.py index eea3aaacd..fa1c02606 100755 --- a/automated/linux/kselftest/parse-output.py +++ b/automated/linux/kselftest/parse-output.py @@ -86,7 +86,7 @@ def make_log_files(results): name = r["name"] if r["result"] == "fail": try: - log_file = open(f"output/thisshouldntwork/{name}.log", "w") + log_file = open(f"output/{name}.log", "w") log_file.writelines(r["logs"]) log_file.close() except OSError as e: From c685ba335f77323089d82bef60ea5501f0bb5002 Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Tue, 8 Apr 2025 11:56:47 -0300 Subject: [PATCH 33/62] automated: linux: ltp: don't duplicate LTP output Commit aac60d98ed35b6630f6076aff2c747a1f4576566 ("automated: linux: ltp: publish log files for kirk runs") made the test-specific output to be printed again so the start and end lines can be captured by LAVA. That has the side effect of nearly doubling log sizes, and also introduces redundancy. By dropping the --verbose flag from the kirk call, we bring log sizes down, while still having their output published in the LAVA test results. Signed-off-by: Antonio Terceiro --- automated/linux/ltp/ltp.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 3becdd4fd..9a4a43334 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -193,8 +193,8 @@ run_ltp() { pipe0_status "${RUNNER} --framework ltp --run-suite shardfile \ -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ - --json-report /tmp/kirk-report.json \ - --verbose" "tee ${OUTPUT}/LTP_${LOG_FILE}.out" + --json-report /tmp/kirk-report.json" \ + "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" rm "/tmp/kirk-report.json" else From ffdabdeda712fe161f1fc5cb6ae8862743f4f68c Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Tue, 8 Apr 2025 12:15:20 -0300 Subject: [PATCH 34/62] send-to-lava: don't warn on missing output We might want to *not* have output for passing tests, at all. Signed-off-by: Antonio Terceiro --- automated/utils/send-to-lava.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/automated/utils/send-to-lava.sh b/automated/utils/send-to-lava.sh index d80723791..fc24072c9 100755 --- a/automated/utils/send-to-lava.sh +++ b/automated/utils/send-to-lava.sh @@ -13,8 +13,6 @@ show_output() { echo "" cat "$test_output" echo "" - else - echo "WARNING: no log file found for ${test_name} at ${test_output}" >&2 fi } From 9560e5a5655ae25ad49c6094bce04ece6b5a610d Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Tue, 8 Apr 2025 14:34:59 -0300 Subject: [PATCH 35/62] automated: linux: ltp: skip logs for passing tests It's very unlikely that someone is going to look into passing tests. This helps reduce even more the demand for storage of test results. Signed-off-by: Antonio Terceiro --- automated/linux/ltp/ltp.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 9a4a43334..ca278a0b4 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -161,9 +161,14 @@ parse_ltp_output() { } parse_ltp_json_results() { + local result jq -r '.results| .[]| "\(.test_fqn) \(.test.result)"' "$1" \ | sed 's/brok/fail/; s/conf/skip/' >> "${RESULT_FILE}" for test_fqn in $(jq -r '.results| .[]| .test_fqn' "$1"); do + result="$(jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.result' "$1")" + if [ "${result}" = pass ]; then + continue + fi jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.log' "$1" > ${OUTPUT}/${test_fqn}.log done } From 6684fb5d0dd9b875db8bf77ceffef116081f56fe Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 9 Apr 2025 17:21:51 +0100 Subject: [PATCH 36/62] automated: linux: Add ALSA basic audio test (#557) This is an incredibly basic test, shipped as part of alsa-utils, which plays and records a tone then uses a FFT to verify that the tone appears sufficiently cleanly in the output. It requires that the system under test have previously been set up with a loopback audio path, either within the card or via cables. Signed-off-by: Mark Brown --- automated/linux/alsa-bat/bat.sh | 72 +++++++++++++++++++++++++++++++ automated/linux/alsa-bat/bat.yaml | 47 ++++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100755 automated/linux/alsa-bat/bat.sh create mode 100644 automated/linux/alsa-bat/bat.yaml diff --git a/automated/linux/alsa-bat/bat.sh b/automated/linux/alsa-bat/bat.sh new file mode 100755 index 000000000..39b542643 --- /dev/null +++ b/automated/linux/alsa-bat/bat.sh @@ -0,0 +1,72 @@ +#!/bin/sh -e +# shellcheck disable=SC1091 + +OUTPUT="$(pwd)/output" +RESULT_FILE="${OUTPUT}/result.txt" + +. ../../lib/sh-test-lib + +create_out_dir "${OUTPUT}" + +PARAMS= + +if [ "${TST_CARD}" != "" ]; then + PARAMS="${PARAMS} -D${TST_CARD}" +fi + +if [ "${TST_CHANNELS}" != "" ]; then + PARAMS="${PARAMS} -c${TST_CHANNELS}" +fi + +if [ "${TST_PLAYBACK}" != "" ]; then + PARAMS="${PARAMS} -P${TST_PLAYBACK}" +fi + +if [ "${TST_CAPTURE}" != "" ]; then + PARAMS="${PARAMS} -C${TST_CAPTURE}" +fi + +if [ "${TST_FORMAT}" != "" ]; then + PARAMS="${PARAMS} -f${TST_FORMAT}" +fi + +if [ "${TST_RATE}" != "" ]; then + PARAMS="${PARAMS} -r${TST_RATE}" +fi + +if [ "${TST_LENGTH}" != "" ]; then + PARAMS="${PARAMS} -n${TST_LENGTH}" +fi + +if [ "${TST_SIGMA_K}" != "" ]; then + PARAMS="${PARAMS} -k${TST_SIGMA_K}" +fi + +if [ "${TST_FREQ}" != "" ]; then + PARAMS="${PARAMS} -F${TST_FREQ}" +fi + +# Debian installs as alsabat due to name collisions +if [ "$(command -v alsabat)" != "" ]; then + BAT=alsabat +elif [ "$(command -v bat)" != "" ]; then + BAT=bat +fi + +if [ "${BAT}" = "" ]; then + echo Unable to find BAT + exit 1 +fi + +TEST_NAME="$(echo "bat${PARAMS}" | sed 's/ /_/g' | sed 's/-//g')" + +# Return code 0 for pass, other codes for various fails +if ${BAT} ${PARAMS} --log=${OUTPUT}/${TEST_NAME}.log ; then + R=pass +else + R=fail +fi + +echo ${TEST_NAME} ${R} >> ${RESULT_FILE} + +../../utils/send-to-lava.sh ${RESULT_FILE} diff --git a/automated/linux/alsa-bat/bat.yaml b/automated/linux/alsa-bat/bat.yaml new file mode 100644 index 000000000..33b9d54a4 --- /dev/null +++ b/automated/linux/alsa-bat/bat.yaml @@ -0,0 +1,47 @@ +metadata: + name: alsabat + format: "Lava-Test Test Definition 1.0" + description: "Run the ALSA Basic Audio Test" + maintainer: + - broonie@kernel.org + os: + - debian + - ubuntu + - fedora + - centos + - oe + scope: + - functional + devices: + - all + +params: + # Number of audio channel to use + TST_CHANNELS: "" + + # Playback device + TST_PLAYBACK: "" + + # Capture device + TST_CAPTURE: "" + + # Sample format + TST_FORMAT: "" + + # Sample rate + TST_RATE: "" + + # Duration of generated signal + TST_LENGTH: "" + + # Sigma k for analysis + TST_SIGMA_K: "" + + # Target frequency + TST_FREQ: "" + +run: + steps: + - cd ./automated/linux/alsa-bat + - ./bat.sh + - ../../utils/send-to-lava.sh ./output/result.txt From 5fee8f473ca7872965a62c894a70d1e0e8dc1604 Mon Sep 17 00:00:00 2001 From: liuyq Date: Wed, 16 Apr 2025 19:51:27 +0800 Subject: [PATCH 37/62] noninteractive-tradefed: using HTTP_CACHE to get kisscache url (#560) After the following lava change: https://gitlab.com/lava/lava/-/merge_requests/2734 the kisscache url is exported by LAVA via the HTTP_CACHE variable, it's better to check and use the varible in the scripts as well, to avoid being passed from the LAVA job definition. Signed-off-by: Yongqin Liu --- .../android/noninteractive-tradefed/tradefed.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/automated/android/noninteractive-tradefed/tradefed.sh b/automated/android/noninteractive-tradefed/tradefed.sh index 4e5804f8a..f7689af65 100755 --- a/automated/android/noninteractive-tradefed/tradefed.sh +++ b/automated/android/noninteractive-tradefed/tradefed.sh @@ -123,7 +123,18 @@ file_name=$(basename "${TEST_URL}") if echo "${TEST_URL}" | grep "^http://lkft-cache.lkftlab/" ; then NO_PROXY=.lkftlab wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" elif echo "${TEST_URL}" | grep "^http" ; then - wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" + # using kisscache to download the file, based on the following change: + # https://gitlab.com/lava/lava/-/merge_requests/2734 + # shellcheck disable=SC2153 + if [ -n "${HTTP_CACHE}" ]; then + # and it's in the format like this: + # https://cache.lavasoftware.org/api/v1/fetch/?url=%s + # so need to remove "%s" first here + http_cache=$(echo "${HTTP_CACHE}"|sed 's|%s||') + wget -S --progress=dot:giga "${http_cache}${TEST_URL}" -O "${file_name}" + else + wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" + fi else cp "${TEST_URL}" "./${file_name}" fi From bb8da647723a7f0d28d59c3d51169bd5a3f2c110 Mon Sep 17 00:00:00 2001 From: Antonio Terceiro Date: Thu, 17 Apr 2025 10:28:30 -0300 Subject: [PATCH 38/62] utils: send-to-lava: respect KMSG if set by the job definition This avoids losing signals if they happen to coincide with kernel messages. Signed-off-by: Antonio Terceiro --- automated/utils/send-to-lava.sh | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/automated/utils/send-to-lava.sh b/automated/utils/send-to-lava.sh index fc24072c9..933354751 100755 --- a/automated/utils/send-to-lava.sh +++ b/automated/utils/send-to-lava.sh @@ -6,13 +6,22 @@ if [ -z "${RESULT_DIR}" ]; then RESULT_DIR="$(dirname "${RESULT_FILE}")" fi +signal() { + if [ -z ${KMSG} ] + then + echo "${1}" + else + echo "<0>${1}" > /dev/kmsg + fi +} + show_output() { test_name="$1" test_output="${RESULT_DIR}/${test_name}.log" if [ -r "$test_output" ]; then - echo "" + signal "" cat "$test_output" - echo "" + signal "" fi } @@ -56,9 +65,9 @@ if [ -f "${RESULT_FILE}" ]; then lava-test-set "${test_set_status}" "${test_set_name}" else if [ "${test_set_status}" = "start" ]; then - echo "" + signal "" else - echo "" + signal "" fi fi fi From 435fa23850a1465aa62cc1dde8caea19a847e50a Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Tue, 29 Apr 2025 18:36:54 +0100 Subject: [PATCH 39/62] docs: fix mkdocs_plugin installation This patch fixes the error caused by deprecated setup.py in mkdocs_plugin. The setup.py is rewritten into pyproject.toml according to the docs: https://packaging.python.org/en/latest/specifications/pyproject-toml/#pyproject-toml-spec Signed-off-by: Milosz Wasilewski --- .github/workflows/docs.yml | 2 +- mkdocs_plugin/pyproject.toml | 30 ++++++++++++++++++++++++++++++ mkdocs_plugin/setup.py | 33 --------------------------------- 3 files changed, 31 insertions(+), 34 deletions(-) create mode 100644 mkdocs_plugin/pyproject.toml delete mode 100644 mkdocs_plugin/setup.py diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ec7551c16..fc6bd92e1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -21,7 +21,7 @@ jobs: - name: Install current plugin run: | - python mkdocs_plugin/setup.py install --force + pip install mkdocs_plugin/ pip freeze - name: Build docs diff --git a/mkdocs_plugin/pyproject.toml b/mkdocs_plugin/pyproject.toml new file mode 100644 index 000000000..bc8a4c7af --- /dev/null +++ b/mkdocs_plugin/pyproject.toml @@ -0,0 +1,30 @@ +[build-system] +requires = [ + "setuptools", + "mkdocs>=1.1", + "tags-macros-plugin@git+https://github.com/mwasilew/mkdocs-plugin-tags.git" +] +build-backend = "setuptools.build_meta" + +[project] +name = "mkdocs-test-definitions-plugin" +version = "1.5" +keywords = [ + "mkdocs", + "python", + "markdown", + "wiki" +] +requires-python = ">=3.5" +license = "GPL-2.0-or-later" + +authors = [ + {name = "Milosz Wasilewski", email = "milosz.wasilewski@oss.qualcomm.com"} +] + +[project.urls] +Repository = "https://github.com/linaro/test-definitions" +GitHub = "https://github.com/linaro/test-definitions" + +[project.entry-points."mkdocs.plugins"] +linaro-test-definitions = "testdefinitionsmkdocs:LinaroTestDefinitionsMkDocsPlugin" diff --git a/mkdocs_plugin/setup.py b/mkdocs_plugin/setup.py deleted file mode 100644 index 6820658d0..000000000 --- a/mkdocs_plugin/setup.py +++ /dev/null @@ -1,33 +0,0 @@ -from setuptools import setup, find_packages - - -setup( - name="mkdocs-test-definitions-plugin", - version="1.5", - description="An MkDocs plugin that converts LAVA test definitions to documentation", - long_description="", - keywords="mkdocs python markdown wiki", - url="https://github.com/linaro/test-definitions", - author="Milosz Wasilewski", - author_email="milosz.wasilewski@linaro.org", - license="GPL", - python_requires=">=3.5", - install_requires=["mkdocs>=1.1", "tags-macros-plugin"], - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Intended Audience :: Information Technology", - "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - ], - packages=find_packages(), - entry_points={ - "mkdocs.plugins": [ - "linaro-test-definitions = testdefinitionsmkdocs:LinaroTestDefinitionsMkDocsPlugin" - ] - }, -) From eb8be1a49133c9b242cf9a9361d5983bf970662d Mon Sep 17 00:00:00 2001 From: liuyq Date: Wed, 30 Apr 2025 18:42:29 +0800 Subject: [PATCH 40/62] android: add the Android KUnit test (#563) with the tests.zip package provided, which is generated by the normal gki kernel build Signed-off-by: Yongqin Liu --- automated/android/kunit/kunit.sh | 146 +++++++++++++++++++++++++++++ automated/android/kunit/kunit.yaml | 34 +++++++ 2 files changed, 180 insertions(+) create mode 100755 automated/android/kunit/kunit.sh create mode 100644 automated/android/kunit/kunit.yaml diff --git a/automated/android/kunit/kunit.sh b/automated/android/kunit/kunit.sh new file mode 100755 index 000000000..e958018b3 --- /dev/null +++ b/automated/android/kunit/kunit.sh @@ -0,0 +1,146 @@ +#!/bin/bash -ex + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +DIR_OUTPUT="$(pwd)/output" +mkdir -p "${DIR_OUTPUT}" +RESULT_FILE="${DIR_OUTPUT}/result.txt" +export RESULT_FILE + +# shellcheck disable=SC1091 +. ../../lib/android-test-lib + +RETRY_COUNT=5 +RETRY_INTERVAL=2 + +TESTS_ZIP_URL="" +SQUAD_UPLOAD_URL="" +TRADEFED_PREBUILTS_GIT_URL="https://android.googlesource.com/platform/tools/tradefederation/prebuilts" + +F_TESTS_ZIP="$(pwd)/tests.zip" +DIR_TESTS="$(pwd)/tests" +DIR_TEST_LOGS="${DIR_OUTPUT}/test-logs" +F_KUNIT_LOG="${DIR_TEST_LOGS}/kunit.log" +DIR_TF_PREBUILTS="$(pwd)/prebuilts" + +function usage(){ + echo "Usage: $0 -u [ -s ]" 1>&2 + exit 1 +} + +function upload_logs_to_squad(){ + if [ -z "${SQUAD_UPLOAD_URL}" ]; then + return + fi + # Upload test log and result files to artifactorial. + name_dir_output=$(basename "${DIR_OUTPUT}") + if ! tar caf "kunit-output-$(date +%Y%m%d%H%M%S).tar.xz" "${name_dir_output}"; then + error_fatal "tradefed - failed to collect results and log files [$ANDROID_SERIAL]" + fi + ATTACHMENT=$(ls kunit-output-*.tar.xz) + ../../utils/upload-to-squad.sh -a "${ATTACHMENT}" -u "${SQUAD_UPLOAD_URL}" +} + +function parse_kunit_log(){ + local f_kunit_log="${1}" + local f_kunit_stub_log="${DIR_TEST_LOGS}/kunit_stub.log" + + if [ -z "${f_kunit_log}" ] || [ ! -f "${f_kunit_log}" ]; then + echo "KUnit log does not exist" + return + fi + # grep the stub log to a single file and parsing the results + # 20:43:20 stub: soc-utils-test.soc-utils#test_snd_soc_params_to_bclk: PASSED (0ms) + # 00:21:09 stub: kunit-example-test.example_init#example_init_test: PASSED (0ms) + # | cut -d: -f4- \ # kunit-example-test.example_init#example_init_test: PASSED (0ms) + # | tr -d ':' \ # kunit-example-test.example_init#example_init_test PASSED (0ms) + # | awk '{print $1, $2}' \ # kunit-example-test.example_init#example_init_test PASSED + # | sort | uniq \ # to filter out the duplication of FAILURE in Result Summary part + grep "stub:" "${f_kunit_log}" \ + | cut -d: -f4- \ + | tr -d ':' \ + | awk '{print $1, $2}' \ + | sort | uniq \ + > "${f_kunit_stub_log}" + while read -r line; do + # kunit-example-test.example_init#example_init_test PASSED + # kunit-example-test.example#example_skip_test IGNORED + # soc-utils-test#soc-utils-test FAILURE + test_case_name=$(echo "${line}"|awk '{print $1}') + test_case_result=$(echo "${line}"|awk '{print $2}') + + # reformat the test case name to avoid potential confusions + # being caused by some special characters + test_case_name=$(echo "${test_case_name}" \ + | tr -c '#@/+,[:alnum:]:.-' '_' \ + | tr -s '_' \ + | sed 's/_$//' \ + ) + + case "X${test_case_result}" in + "XPASSED") + report_pass "${test_case_name}" + ;; + "XIGNORED") + report_skip "${test_case_name}" + ;; + "XFAILURE") + report_fail "${test_case_name}" + ;; + *) + report_unknown "${test_case_name}" + ;; + esac + done < "${f_kunit_stub_log}" +} + +while getopts "u:s:h" o; do + case "$o" in + u) TESTS_ZIP_URL="${OPTARG}" ;; + s) SQUAD_UPLOAD_URL="${OPTARG}" ;; + h|*) usage ;; + esac +done + +# export ANDROID_SERIAL +initialize_adb + +if [ -z "${TESTS_ZIP_URL}" ]; then + echo "The TESTS_ZIP_URL must be specified." + exit 1 +fi + +# download and unzip tests.zip +rm -f "${F_TESTS_ZIP}" && \ + curl --retry "${RETRY_COUNT}" --retry-delay "${RETRY_INTERVAL}" -fsSL "${TESTS_ZIP_URL}" -o "${F_TESTS_ZIP}" +rm -fr "${DIR_TESTS}" && \ + mkdir -p "${DIR_TESTS}" && \ + unzip -o "${F_TESTS_ZIP}" -d "${DIR_TESTS}" + +# clone the tradefed prebuilts repository +i=1 +while [ $i -le "${RETRY_COUNT}" ]; do + rm -fr "${DIR_TF_PREBUILTS}" + if git clone --depth 1 "${TRADEFED_PREBUILTS_GIT_URL}" "${DIR_TF_PREBUILTS}"; then + break + fi + + # try again in ${RETRY_INTERVAL} seconds + sleep "${RETRY_INTERVAL}" + i=$((i + 1)) +done + +# run the kunit test +mkdir -p "${DIR_TEST_LOGS}" +prebuilts/filegroups/tradefed/tradefed.sh \ + run commandAndExit \ + template/local_min \ + --template:map test=suite/test_mapping_suite \ + --include-filter kunit \ + --tests-dir="${DIR_TESTS}" \ + --log-file-path="${DIR_TEST_LOGS}" \ + -s "${ANDROID_SERIAL}" |& tee "${F_KUNIT_LOG}" + +parse_kunit_log "${F_KUNIT_LOG}" + +upload_logs_to_squad diff --git a/automated/android/kunit/kunit.yaml b/automated/android/kunit/kunit.yaml new file mode 100644 index 000000000..a38f060d7 --- /dev/null +++ b/automated/android/kunit/kunit.yaml @@ -0,0 +1,34 @@ +metadata: + name: kunit-tests + format: "Lava-Test Test Definition 1.0" + description: | + Run the KUnit test on Android based on the tradefed framework + provided by google. + maintainer: + - yongqin.liu@linaro.org + os: + - android + devices: + - db845c + - rb5 + - sm8550 + scope: + - functional + +params: + # The url of the tests.zip file generated during the kernel build, + # which includes files for the kunit modules and necessary configurations + TESTS_ZIP_URL: "" + # The SQUAD url to be used to upload the result and log files. + # see https://squad.readthedocs.io/en/latest/intro.html#submitting-results. + # SQUAD_ARCHIVE_SUBMIT_TOKEN is used for uploading authentication, + # and must be defined by the submitter as one profile managed token + SQUAD_UPLOAD_URL: "" + +run: + steps: + - cd ./automated/android/kunit + # Run setup.sh in the original shell to reserve env variables. + - ./kunit.sh -u "${TESTS_ZIP_URL}" -s "${SQUAD_UPLOAD_URL}" + # Send test result to LAVA. + - ../../utils/send-to-lava.sh ./output/result.txt From 5c2c2bba50d8f4d9cc40e11d6faa0b25f503efb7 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 11 Apr 2025 11:04:31 +0200 Subject: [PATCH 41/62] automated: linux: ltp: kirk runner: parallelize tests Add support for parallel test execution with the new LTP test runner, kirk. This update introduces the KIRK_WORKERS variable, which allows you to configure the number of worker processes used for running tests in parallel. This can significantly reduce test runtime on systems with many CPU cores. Suggested-by: Mark Brown Signed-off-by: Anders Roxell --- automated/linux/ltp/ltp.sh | 7 ++++++- automated/linux/ltp/ltp.yaml | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index ca278a0b4..0ece790df 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -31,6 +31,7 @@ SHARD_NUMBER=1 SHARD_INDEX=1 RUNNER="" +KIRK_WORKERS=1 LTP_TMPDIR=/ltp-tmp @@ -58,7 +59,7 @@ usage() { exit 0 } -while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:" arg; do +while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:w:" arg; do case "$arg" in T) TST_CMDFILES="${OPTARG}" @@ -135,6 +136,9 @@ while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:" arg; do n) SHARD_NUMBER="$OPTARG" ;; + w) + KIRK_WORKERS="$OPTARG" + ;; *) usage error_msg "No flag ${OPTARG}" @@ -198,6 +202,7 @@ run_ltp() { pipe0_status "${RUNNER} --framework ltp --run-suite shardfile \ -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ + ${KIRK_WORKERS:+--workers} ${KIRK_WORKERS} \ --json-report /tmp/kirk-report.json" \ "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" diff --git a/automated/linux/ltp/ltp.yaml b/automated/linux/ltp/ltp.yaml index 8ec8ce9b0..2d49e9b87 100644 --- a/automated/linux/ltp/ltp.yaml +++ b/automated/linux/ltp/ltp.yaml @@ -44,6 +44,10 @@ params: # Set RUNNER to full path to kirk or to kirk if its in the PATH. RUNNER: "" + # Number of parallel test workers to use with the new KIRK runner. + # # Tests can now run concurrently by specifying '--workers X'. + KIRK_WORKERS: 1 + # If the following parameter is set, then the LTP suite is # cloned and used unconditionally. In particular, the version # of the suite is set to the commit pointed to by the @@ -76,5 +80,5 @@ params: run: steps: - cd ./automated/linux/ltp/ - - ./ltp.sh -T "${TST_CMDFILES}" -s "${SKIP_INSTALL}" -v "${LTP_VERSION}" -M "${TIMEOUT_MULTIPLIER}" -R "${ROOT_PASSWD}" -r "${RUNNER}" -b "${BOARD}" -d "${LTP_TMPDIR}" -g "${BRANCH}" -e "${ENVIRONMENT}" -i "${LTP_INSTALL_PATH}" -S "${SKIPFILE}" -p "${TEST_DIR}" -u "${TEST_GIT_URL}" -t "${BUILD_FROM_TAR}" -n "${SHARD_NUMBER}" -c "${SHARD_INDEX}" + - ./ltp.sh -T "${TST_CMDFILES}" -s "${SKIP_INSTALL}" -v "${LTP_VERSION}" -M "${TIMEOUT_MULTIPLIER}" -R "${ROOT_PASSWD}" -r "${RUNNER}" -b "${BOARD}" -d "${LTP_TMPDIR}" -g "${BRANCH}" -e "${ENVIRONMENT}" -i "${LTP_INSTALL_PATH}" -S "${SKIPFILE}" -p "${TEST_DIR}" -u "${TEST_GIT_URL}" -t "${BUILD_FROM_TAR}" -n "${SHARD_NUMBER}" -c "${SHARD_INDEX}" -w "${KIRK_WORKERS}" - ../../utils/send-to-lava.sh ./output/result.txt From 507af6453673fea02d50cde20a5e603382452231 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 09:24:50 +0200 Subject: [PATCH 42/62] automated: linux: cyclicdeadline: add generic pass/fail logic Enhance the cyclicdeadline test to automatically determine a PASS or FAIL result based on collected max-latency measurements. This change: - Adds logic to parse the max-latency values from output. - Calculates the minimum observed latency. - Sets a threshold at 110% of the minimum latency. - Counts the number of runs that exceed this threshold. - Fails the test if more than half the iterations exceed the threshold. This enables automated evaluation of cyclicdeadline results without manual inspection, making the test more suitable for continuous integration environments. Signed-off-by: Anders Roxell --- .../linux/cyclicdeadline/cyclicdeadline.sh | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 625c8d08b..7a65d7b85 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -68,3 +68,45 @@ for i in $(seq ${ITERATIONS}); do fi cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done + +if [ "${ITERATIONS}" -gt 2 ]; then + max_latencies_file="${OUTPUT}/max_latencies.txt" + + # Extract all max-latency values into a file + grep "max-latency" "${RESULT_FILE}" | grep "^iteration-" | awk '{ print $(NF-1) }' |tee "${max_latencies_file}" + + if [ ! -s "${max_latencies_file}" ]; then + echo "No max-latency values found!" + report_fail "rt-tests-cyclicdeadline" + exit 1 + fi + + # Find the minimum latency + min_latency=$(sort -n "${max_latencies_file}" | head -n1) + + threshold=$(echo "$min_latency * 1.10" | bc -l) + + echo "Minimum max latency: $min_latency" + echo "Threshold (min * 1.10): $threshold" + + # Count how many latencies exceed threshold + fail_count=0 + while read -r val; do + is_greater=$(echo "$val > $threshold" | bc -l) + if [ "$is_greater" -eq 1 ]; then + fail_count=$((fail_count + 1)) + fi + done < "${max_latencies_file}" + + fail_limit=$((ITERATIONS / 2)) + + echo "Max allowed failures: $fail_limit" + echo "Actual failures: $fail_count" + echo "Number of max latencies above 110% of min: $fail_count" + + if [ "$fail_count" -ge "$fail_limit" ]; then + report_fail "rt-tests-cyclicdeadline" + else + report_pass "rt-tests-cyclicdeadline" + fi +fi From 1cfe0c2ab71eae13359b6bfcf624db04dc0e53d8 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 14:53:33 +0200 Subject: [PATCH 43/62] automated: linux: cyclicdeadline: add pthreashold variable Adds a `pthreshold` field to the cyclicdeadline test configuration. The `pthreshold` parameter allows users to define a performance threshold for the maximum acceptable latency, enabling more flexible validation criteria. If the maximum latency exceeds the configured `pthreshold`, the test will fail, providing an additional mechanism for automated performance evaluation. Signed-off-by: Anders Roxell --- automated/linux/cyclicdeadline/cyclicdeadline.sh | 13 ++++++++----- automated/linux/cyclicdeadline/cyclicdeadline.yaml | 3 ++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 7a65d7b85..1df16d8e6 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -17,13 +17,14 @@ THREADS="1" DURATION="1m" BACKGROUND_CMD="" ITERATIONS=1 +PTHRESHOLD="10" usage() { - echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations]" 1>&2 + echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations] [-p procent threshold]" 1>&2 exit 1 } -while getopts ":i:s:t:D:w:I:" opt; do +while getopts ":i:s:t:D:w:I:p:" opt; do case "${opt}" in i) INTERVAL="${OPTARG}" ;; s) STEP="${OPTARG}" ;; @@ -31,6 +32,7 @@ while getopts ":i:s:t:D:w:I:" opt; do D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; I) ITERATIONS="${OPTARG}" ;; + p) PTHRESHOLD="${OPTARG}" ;; *) usage ;; esac done @@ -84,10 +86,11 @@ if [ "${ITERATIONS}" -gt 2 ]; then # Find the minimum latency min_latency=$(sort -n "${max_latencies_file}" | head -n1) - threshold=$(echo "$min_latency * 1.10" | bc -l) + echo "PTHRESHOLD: ${PTHRESHOLD}" + threshold=$(echo "$min_latency * (1.${PTHRESHOLD})" | bc -l) echo "Minimum max latency: $min_latency" - echo "Threshold (min * 1.10): $threshold" + echo "Threshold: $threshold in procent 1.$PTHRESHOLD" # Count how many latencies exceed threshold fail_count=0 @@ -102,7 +105,7 @@ if [ "${ITERATIONS}" -gt 2 ]; then echo "Max allowed failures: $fail_limit" echo "Actual failures: $fail_count" - echo "Number of max latencies above 110% of min: $fail_count" + echo "Number of max latencies above 1.${PTHRESHOLD}% of min: $fail_count" if [ "$fail_count" -ge "$fail_limit" ]; then report_fail "rt-tests-cyclicdeadline" diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.yaml b/automated/linux/cyclicdeadline/cyclicdeadline.yaml index 7405ee669..c8bec618c 100644 --- a/automated/linux/cyclicdeadline/cyclicdeadline.yaml +++ b/automated/linux/cyclicdeadline/cyclicdeadline.yaml @@ -46,10 +46,11 @@ params: ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" ITERATIONS: 1 + PTHRESHOLD: "10" run: steps: - cd ./automated/linux/cyclicdeadline/ - - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" + - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" -p "${PTHRESHOLD}" - ../../utils/upload-to-artifactorial.sh -a "output/cyclicdeadline.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From a0b6641fc2fc4e340d9cc48d5361b8bde16cd4e7 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 09:24:50 +0200 Subject: [PATCH 44/62] automated: linux: pi-stress: add generic pass/fail logic Enhance the pi-stress test to automatically determine a PASS or FAIL result based on collected inversion measurements. This change: - Adds logic to parse the inversion values from output. - Calculates the minimum observed inversion. - Sets a threshold at 110% of the minimum inversion. - Counts the number of runs that exceed this threshold. - Fails the test if more than half the iterations exceed the threshold. This enables automated evaluation of pi-stress results without manual inspection, making the test more suitable for continuous integration environments. Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index dfa935f6f..892be6e76 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -76,3 +76,45 @@ for i in $(seq ${ITERATIONS}); do fi cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" done + +if [ "${ITERATIONS}" -gt 2 ]; then + max_inversions_file="${OUTPUT}/max_inversions.txt" + + # Extract all inversion values into a file + grep "inversion" "${RESULT_FILE}" | grep "^iteration-" | awk '{ print $(NF-1) }' |tee "${max_inversions_file}" + + if [ ! -s "${max_inversions_file}" ]; then + echo "No inversion values found!" + report_fail "rt-tests-pi-stress" + exit 1 + fi + + # Find the minimum inversion + min_inversion=$(sort -n "${max_inversions_file}" | head -n1) + + threshold=$(echo "$min_inversion * 1.10" | bc -l) + + echo "Minimum max inversion: $min_inversion" + echo "Threshold (min * 1.10): $threshold" + + # Count how many inversions exceed threshold + fail_count=0 + while read -r val; do + is_greater=$(echo "$val > $threshold" | bc -l) + if [ "$is_greater" -eq 1 ]; then + fail_count=$((fail_count + 1)) + fi + done < "${max_inversions_file}" + + fail_limit=$((ITERATIONS / 2)) + + echo "Max allowed failures: $fail_limit" + echo "Actual failures: $fail_count" + echo "Number of max inversions above 110% of min: $fail_count" + + if [ "$fail_count" -ge "$fail_limit" ]; then + report_fail "rt-tests-pi-stress" + else + report_pass "rt-tests-pi-stress" + fi +fi From d45e326b3aad4d4ac15e497035e9c5f0857f1f98 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 29 Apr 2025 09:23:28 +0200 Subject: [PATCH 45/62] automated: linux: pi-stress: add pthreashold variable Adds a `pthreshold` field to the pi-stress test configuration. The `pthreshold` parameter allows users to define a performance threshold for the maximum acceptable latency, enabling more flexible validation criteria. If the maximum latency exceeds the configured `pthreshold`, the test will fail, providing an additional mechanism for automated performance evaluation. Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 13 ++++++++----- automated/linux/pi-stress/pi-stress.yaml | 3 ++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 892be6e76..43234c123 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -17,19 +17,21 @@ MLOCKALL="false" RR="false" BACKGROUND_CMD="" ITERATIONS=1 +PTHRESHOLD="10" usage() { - echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations]" 1>&2 + echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations] [-p procent threshold]" 1>&2 exit 1 } -while getopts ":D:m:r:w:i:" opt; do +while getopts ":D:m:r:w:i:p:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; m) MLOCKALL="${OPTARG}" ;; r) RR="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; i) ITERATIONS="${OPTARG}" ;; + p) PTHRESHOLD="${OPTARG}" ;; *) usage ;; esac done @@ -92,10 +94,11 @@ if [ "${ITERATIONS}" -gt 2 ]; then # Find the minimum inversion min_inversion=$(sort -n "${max_inversions_file}" | head -n1) - threshold=$(echo "$min_inversion * 1.10" | bc -l) + echo "PTHRESHOLD: ${PTHRESHOLD}" + threshold=$(echo "$min_inversion * (1.${PTHRESHOLD})" | bc -l) echo "Minimum max inversion: $min_inversion" - echo "Threshold (min * 1.10): $threshold" + echo "Threshold: $threshold in procent 1.$PTHRESHOLD" # Count how many inversions exceed threshold fail_count=0 @@ -110,7 +113,7 @@ if [ "${ITERATIONS}" -gt 2 ]; then echo "Max allowed failures: $fail_limit" echo "Actual failures: $fail_count" - echo "Number of max inversions above 110% of min: $fail_count" + echo "Number of max inversions above 1.${PTHRESHOLD}% of min: $fail_count" if [ "$fail_count" -ge "$fail_limit" ]; then report_fail "rt-tests-pi-stress" diff --git a/automated/linux/pi-stress/pi-stress.yaml b/automated/linux/pi-stress/pi-stress.yaml index 2c553cae2..3081844a5 100644 --- a/automated/linux/pi-stress/pi-stress.yaml +++ b/automated/linux/pi-stress/pi-stress.yaml @@ -41,10 +41,11 @@ params: ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" ITERATIONS: 1 + PTHRESHOLD: "10" run: steps: - cd automated/linux/pi-stress - - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" + - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" -p "${PTHRESHOLD}" - ../../utils/upload-to-artifactorial.sh -a "output/pi-stress.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From a7b173b6baac847c3dbc0c34bc871f8180a17f10 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 12:24:19 +0200 Subject: [PATCH 46/62] automated: linux: modules: scan dmesg for errors Add a helper function to scan dmesg after each module insert and remove, looking for common kernel error signatures like "BUG:", "Oops:", "WARNING:", and "Call Trace:". This improves early detection of subtle regressions during module load/unload testing that may otherwise go unnoticed. Signed-off-by: Anders Roxell --- automated/linux/modules/modules.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index b6e5e8189..9beefa509 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -76,6 +76,11 @@ report() { fi } +scan_dmesg_for_errors() { + echo "=== Scanning dmesg for errors ===" + dmesg -l 0,1,2,3,4,5 | grep -Ei "BUG:|WARNING:|Oops:|Call Trace:" && report_fail "dmesg_error_scan" || report_pass "dmesg_error_scan" +} + run () { for module in ${MODULES_LIST}; do # don't insert/remove modules that is already inserted. @@ -86,8 +91,10 @@ run () { echo echo "modinfo ${module}" modinfo "${module}" + scan_dmesg_for_errors + report "--remove" "${module}" "remove" "${num}" - dmesg -l 0,1,2,3,4,5 + scan_dmesg_for_errors done fi done From e6199bd7d16dd57a8d24b2a6da2e14b031f6d523 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 12:25:03 +0200 Subject: [PATCH 47/62] automated: linux: modules: check for stuck modules Introduce a helper to verify that a module is properly unloaded after modprobe --remove. If a module remains listed in lsmod, it indicates a stuck reference count and a possible kernel bug. This improves the robustness of module load/unload testing by catching incomplete removals. Signed-off-by: Anders Roxell --- automated/linux/modules/modules.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index 9beefa509..919ff21c3 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -81,6 +81,16 @@ scan_dmesg_for_errors() { dmesg -l 0,1,2,3,4,5 | grep -Ei "BUG:|WARNING:|Oops:|Call Trace:" && report_fail "dmesg_error_scan" || report_pass "dmesg_error_scan" } +check_module_unloaded() { + local _module="$1" + if lsmod | grep "^${_module} " > /dev/null; then + echo "Module ${_module} still loaded after removal!" + report_fail "module_stuck_${_module}" + else + report_pass "module_unloaded_${_module}" + fi +} + run () { for module in ${MODULES_LIST}; do # don't insert/remove modules that is already inserted. @@ -95,6 +105,8 @@ run () { report "--remove" "${module}" "remove" "${num}" scan_dmesg_for_errors + + check_module_unloaded "${module}" done fi done From ad114aacceae772d921b1a73c3df03f03b4c2fd4 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 12:25:26 +0200 Subject: [PATCH 48/62] automated: linux: modules: add kmemleak scan Add a final memory leak detection pass using kmemleak if it is enabled in the running kernel. Trigger a kmemleak scan after all module load/unload cycles and report any leaked memory objects. This helps detect regressions involving memory leaks caused by module insert/remove operations. Signed-off-by: Anders Roxell --- automated/linux/modules/modules.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index 919ff21c3..2e407fde6 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -91,6 +91,23 @@ check_module_unloaded() { fi } +kmemleak_scan() { + if [ -e /sys/kernel/debug/kmemleak ]; then + echo "Triggering kmemleak scan..." + echo scan > /sys/kernel/debug/kmemleak + sleep 5 + if grep -q . /sys/kernel/debug/kmemleak; then + echo "Potential memory leaks detected:" + cat /sys/kernel/debug/kmemleak + report_fail "kmemleak_detected" + else + report_pass "kmemleak_no_leaks" + fi + else + echo "kmemleak not available, skipping scan." + fi +} + run () { for module in ${MODULES_LIST}; do # don't insert/remove modules that is already inserted. @@ -119,3 +136,4 @@ info_msg "Output directory: ${OUTPUT}" info_msg "About to run load/unload kernel modules ..." get_modules_list run +kmemleak_scan From 25fd951c72f2d23066c67b2f31bfc37821e44315 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 28 Apr 2025 13:50:31 +0200 Subject: [PATCH 49/62] automated: linux: modules: fix get_modules_list fallback logic Fix get_modules_list to properly handle the case where both MODULES_LIST and MODULES_SUBDIRS are empty. Previously, an empty MODULES_SUBDIRS resulted in an invalid grep expression, causing no modules to be selected. With this change, the script defaults to using all modules listed in modules.order if no specific list or subdirectory is provided. Also ensure the skiplist is optional and handled safely. Signed-off-by: Anders Roxell --- automated/linux/modules/modules.sh | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index 2e407fde6..158733801 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -40,10 +40,21 @@ done get_modules_list() { if [ -z "${MODULES_LIST}" ]; then - subdir=$(echo "${MODULES_SUBDIRS}" | tr ' ' '|') - skiplist=$(echo "${SKIPLIST}" | tr ' ' '|') - grep -E "kernel/(${subdir})" /lib/modules/"$(uname -r)"/modules.order | tee /tmp/find_modules.txt - grep -E -v "(${skiplist})" /tmp/find_modules.txt | tee /tmp/modules_to_run.txt + if [ -n "${MODULES_SUBDIRS}" ]; then + subdir=$(echo "${MODULES_SUBDIRS}" | tr ' ' '|') + grep -E "kernel/(${subdir})" /lib/modules/"$(uname -r)"/modules.order > /tmp/find_modules.txt + else + # No subdir given, default to all modules + cat /lib/modules/"$(uname -r)"/modules.order > /tmp/find_modules.txt + fi + + if [ -n "${SKIPLIST}" ]; then + skiplist=$(echo "${SKIPLIST}" | tr ' ' '|') + grep -E -v "(${skiplist})" /tmp/find_modules.txt > /tmp/modules_to_run.txt + else + cp /tmp/find_modules.txt /tmp/modules_to_run.txt + fi + split --verbose --numeric-suffixes=1 -n l/"${SHARD_INDEX}"/"${SHARD_NUMBER}" /tmp/modules_to_run.txt > /tmp/shardfile echo "============== Tests to run ===============" cat /tmp/shardfile From a52e64b90dc22c1dc4c3b528a8f0a692329421e4 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Wed, 7 May 2025 14:17:51 +0200 Subject: [PATCH 50/62] automated: linux: pi-stress: fix threshold The pi-stress results shouldn't be based on the minimum value, it should be -X% from the max_inversion. Anything less than threshold is a fail. Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 43234c123..362f8ce95 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -92,19 +92,15 @@ if [ "${ITERATIONS}" -gt 2 ]; then fi # Find the minimum inversion - min_inversion=$(sort -n "${max_inversions_file}" | head -n1) + max_inversion=$(sort -n "${max_inversions_file}" | tail -n1) + echo "Calculated max_inversion: ${max_inversion}" + threshold=$(echo "$max_inversion * (1 - (${PTHRESHOLD}/100))" | bc -l) + echo "Threshold: $threshold (i.e., within -${PTHRESHOLD}%)" - echo "PTHRESHOLD: ${PTHRESHOLD}" - threshold=$(echo "$min_inversion * (1.${PTHRESHOLD})" | bc -l) - - echo "Minimum max inversion: $min_inversion" - echo "Threshold: $threshold in procent 1.$PTHRESHOLD" - - # Count how many inversions exceed threshold fail_count=0 while read -r val; do - is_greater=$(echo "$val > $threshold" | bc -l) - if [ "$is_greater" -eq 1 ]; then + is_less=$(echo "$val < $threshold" | bc -l) + if [ "$is_less" -eq 1 ]; then fail_count=$((fail_count + 1)) fi done < "${max_inversions_file}" @@ -113,7 +109,6 @@ if [ "${ITERATIONS}" -gt 2 ]; then echo "Max allowed failures: $fail_limit" echo "Actual failures: $fail_count" - echo "Number of max inversions above 1.${PTHRESHOLD}% of min: $fail_count" if [ "$fail_count" -ge "$fail_limit" ]; then report_fail "rt-tests-pi-stress" From 6662ecf825c4f5da2d6dded6bacf78a90eb0ffb7 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Wed, 7 May 2025 09:11:08 +0000 Subject: [PATCH 51/62] automated: linux: pi-stress: allow user-specified baseline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, the pi-stress script dynamically computed the baseline (minimum inversion value) across all iterations. This patch adds support for a new `-x` option that lets users manually specify a fixed `user_baseline` value. Using a fixed baseline is preferred over percentage-based thresholds in this context, as it offers more consistent and predictable evaluation—especially in controlled benchmarking environments or when comparing results across different systems and runs. Percentages can be misleading if the dynamic baseline fluctuates due to noise or system conditions. This change improves flexibility and repeatability in post-processing and performance analysis. Signed-off-by: Anders Roxell --- automated/linux/pi-stress/pi-stress.sh | 21 ++++++++++++--------- automated/linux/pi-stress/pi-stress.yaml | 4 ++-- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 362f8ce95..86a9b9499 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -17,21 +17,21 @@ MLOCKALL="false" RR="false" BACKGROUND_CMD="" ITERATIONS=1 -PTHRESHOLD="10" +USER_BASELINE="" usage() { - echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations] [-p procent threshold]" 1>&2 + echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations] [-x user baseline]" 1>&2 exit 1 } -while getopts ":D:m:r:w:i:p:" opt; do +while getopts ":D:m:r:w:i:x:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; m) MLOCKALL="${OPTARG}" ;; r) RR="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; i) ITERATIONS="${OPTARG}" ;; - p) PTHRESHOLD="${OPTARG}" ;; + x) USER_BASELINE="${OPTARG}" ;; *) usage ;; esac done @@ -92,14 +92,17 @@ if [ "${ITERATIONS}" -gt 2 ]; then fi # Find the minimum inversion - max_inversion=$(sort -n "${max_inversions_file}" | tail -n1) - echo "Calculated max_inversion: ${max_inversion}" - threshold=$(echo "$max_inversion * (1 - (${PTHRESHOLD}/100))" | bc -l) - echo "Threshold: $threshold (i.e., within -${PTHRESHOLD}%)" + if [ -n "${USER_BASELINE}" ]; then + max_inversion="${USER_BASELINE}" + echo "Using user-provided user_baseline: ${max_inversion}" + else + max_inversion=$(sort -n "${max_inversions_file}" | tail -n1) + echo "Calculated max_inversion: ${max_inversion}" + fi fail_count=0 while read -r val; do - is_less=$(echo "$val < $threshold" | bc -l) + is_less=$(echo "$val < $max_inversion" | bc -l) if [ "$is_less" -eq 1 ]; then fail_count=$((fail_count + 1)) fi diff --git a/automated/linux/pi-stress/pi-stress.yaml b/automated/linux/pi-stress/pi-stress.yaml index 3081844a5..2215964e2 100644 --- a/automated/linux/pi-stress/pi-stress.yaml +++ b/automated/linux/pi-stress/pi-stress.yaml @@ -41,11 +41,11 @@ params: ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" ITERATIONS: 1 - PTHRESHOLD: "10" + USER_BASELINE: "" run: steps: - cd automated/linux/pi-stress - - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" -p "${PTHRESHOLD}" + - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" -x "${USER_BASELINE}" - ../../utils/upload-to-artifactorial.sh -a "output/pi-stress.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 975b3d4bc03401ea9c5c6e078e1f3ac601561121 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Wed, 7 May 2025 17:00:43 +0200 Subject: [PATCH 52/62] automated: linux: cyclicdeadline: add user-specified baseline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, the cyclicdeadline script dynamically computed the minimum latency value across all iterations. This patch introduces a new `-x` option that enables users to specify a fixed `user_baseline` value instead. Using a fixed baseline is preferred over percentage-based thresholds in this context, as it offers more consistent and predictable evaluation—especially in controlled benchmarking environments or when comparing results across different systems and runs. Percentages can be misleading if the dynamic baseline fluctuates due to noise or system conditions. This change improves flexibility and repeatability in post-processing and performance analysis. Signed-off-by: Anders Roxell --- .../linux/cyclicdeadline/cyclicdeadline.sh | 28 +++++++++---------- .../linux/cyclicdeadline/cyclicdeadline.yaml | 4 +-- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 1df16d8e6..e505ea025 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -17,14 +17,14 @@ THREADS="1" DURATION="1m" BACKGROUND_CMD="" ITERATIONS=1 -PTHRESHOLD="10" +USER_BASELINE="" usage() { - echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations] [-p procent threshold]" 1>&2 + echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations] [-x user_baseline]" 1>&2 exit 1 } -while getopts ":i:s:t:D:w:I:p:" opt; do +while getopts ":i:s:t:D:w:I:x:" opt; do case "${opt}" in i) INTERVAL="${OPTARG}" ;; s) STEP="${OPTARG}" ;; @@ -32,7 +32,7 @@ while getopts ":i:s:t:D:w:I:p:" opt; do D) DURATION="${OPTARG}" ;; w) BACKGROUND_CMD="${OPTARG}" ;; I) ITERATIONS="${OPTARG}" ;; - p) PTHRESHOLD="${OPTARG}" ;; + x) USER_BASELINE="${OPTARG}" ;; *) usage ;; esac done @@ -83,19 +83,17 @@ if [ "${ITERATIONS}" -gt 2 ]; then exit 1 fi - # Find the minimum latency - min_latency=$(sort -n "${max_latencies_file}" | head -n1) - - echo "PTHRESHOLD: ${PTHRESHOLD}" - threshold=$(echo "$min_latency * (1.${PTHRESHOLD})" | bc -l) - - echo "Minimum max latency: $min_latency" - echo "Threshold: $threshold in procent 1.$PTHRESHOLD" + if [ -n "${USER_BASELINE}" ]; then + echo "Using user-provided baseline: ${USER_BASELINE}" + min_latency="${USER_BASELINE}" + else + # Find the minimum latency + min_latency=$(sort -n "${max_latencies_file}" | head -n1) + fi - # Count how many latencies exceed threshold fail_count=0 while read -r val; do - is_greater=$(echo "$val > $threshold" | bc -l) + is_greater=$(echo "$val > $min_latency" | bc -l) if [ "$is_greater" -eq 1 ]; then fail_count=$((fail_count + 1)) fi @@ -105,7 +103,7 @@ if [ "${ITERATIONS}" -gt 2 ]; then echo "Max allowed failures: $fail_limit" echo "Actual failures: $fail_count" - echo "Number of max latencies above 1.${PTHRESHOLD}% of min: $fail_count" + echo "Number of max latencies above baseline ($min_latency) : $fail_count" if [ "$fail_count" -ge "$fail_limit" ]; then report_fail "rt-tests-cyclicdeadline" diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.yaml b/automated/linux/cyclicdeadline/cyclicdeadline.yaml index c8bec618c..6b9de2de2 100644 --- a/automated/linux/cyclicdeadline/cyclicdeadline.yaml +++ b/automated/linux/cyclicdeadline/cyclicdeadline.yaml @@ -46,11 +46,11 @@ params: ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" ITERATIONS: 1 - PTHRESHOLD: "10" + USER_BASELINE: "" run: steps: - cd ./automated/linux/cyclicdeadline/ - - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" -p "${PTHRESHOLD}" + - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" -x "${USER_BASELINE}" - ../../utils/upload-to-artifactorial.sh -a "output/cyclicdeadline.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt From 249ed0ad77dc229862cc4ec2022f838856c9006b Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Thu, 15 May 2025 09:43:44 +0100 Subject: [PATCH 53/62] automated: lib: modify check_return to support xfail xfail tanslates to "expected failure". This implementation assumes that exit code different than 0 means failure. The patch modifies check_return to accept additional parameter "xfail". If the parameter is set, exit code 0 will be considered "fail" and exit code different than 0 will be considered "pass". Signed-off-by: Milosz Wasilewski --- automated/lib/sh-test-lib | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/automated/lib/sh-test-lib b/automated/lib/sh-test-lib index bce0e716e..0fa0d0eab 100755 --- a/automated/lib/sh-test-lib +++ b/automated/lib/sh-test-lib @@ -102,15 +102,16 @@ exit_on_skip() { check_return() { # shellcheck disable=SC2039 local exit_code="$?" - [ "$#" -ne 1 ] && error_msg "Usage: check_return test_case" + [ "$#" -lt 1 ] && error_msg "Usage: check_return test_case [xfail]" # shellcheck disable=SC2039 local test_case="$1" + local xfail="${2:-}" - if [ "${exit_code}" -ne 0 ]; then - echo "${test_case} fail" | tee -a "${RESULT_FILE}" + if [ "${exit_code}" -ne 0 ] && [ -z "${xfail}" ]; then + report_fail "${test_case}" return "${exit_code}" else - echo "${test_case} pass" | tee -a "${RESULT_FILE}" + report_pass "${test_case}" return 0 fi } From 302686d35ec1593464caf72a616e50217c8bbed5 Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Thu, 15 May 2025 09:59:47 +0100 Subject: [PATCH 54/62] automated: lib: fix shellcheck warnings Remove return that would never be reached after calling error_msg and fix unquoted string variable. Signed-off-by: Milosz Wasilewski --- automated/lib/sh-test-lib | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/automated/lib/sh-test-lib b/automated/lib/sh-test-lib index 0fa0d0eab..c38bf4d25 100755 --- a/automated/lib/sh-test-lib +++ b/automated/lib/sh-test-lib @@ -238,7 +238,7 @@ dist_name() { fi # convert dist to lower case - dist=$(echo ${dist} | tr '[:upper:]' '[:lower:]') + dist=$(echo "${dist}" | tr '[:upper:]' '[:lower:]') case "${dist}" in rpb*) dist="oe-rpb" ;; esac @@ -337,7 +337,6 @@ validate_check_sum() { if [ "$#" -ne 2 ]; then warn_msg "The number of parameters should be 2" error_msg "Usage: validate_check_sum filename known_sha256sum" - return 1 fi # shellcheck disable=SC2039 local OUTPUT_FILE_NAME="$1" From e7254673cb07fd9678916e0c0a2d20b6db402bae Mon Sep 17 00:00:00 2001 From: Milosz Wasilewski Date: Mon, 12 May 2025 10:58:42 +0100 Subject: [PATCH 55/62] automated: linux: add more options to docker networking Add "host" and "none" network options to docker-networking tests. This allows to cover all simple possibilities when it comes to docker networking. All tests can be executed in one run by adding -n option to the script invocation. Example: docker-networking.sh -n "bridge host" Test names were appended with ${NETWORK} to indicate which network type is tested. Default is "bridge". Signed-off-by: Milosz Wasilewski --- .../docker-networking/docker-networking.sh | 104 ++++++++++++------ .../docker-networking/docker-networking.yaml | 6 +- 2 files changed, 75 insertions(+), 35 deletions(-) diff --git a/automated/linux/docker-networking/docker-networking.sh b/automated/linux/docker-networking/docker-networking.sh index 51c6d6ce9..b5c45abd0 100755 --- a/automated/linux/docker-networking/docker-networking.sh +++ b/automated/linux/docker-networking/docker-networking.sh @@ -7,16 +7,23 @@ RESULT_FILE="${OUTPUT}/result.txt" export RESULT_FILE IMAGE="alpine:latest" SKIP_INSTALL="True" +NETWORK_TYPE="bridge" +HOST_INTERFACE="eth0" usage() { - echo "$0 [-i ] [-s true|false]" 1>&2 + echo "$0 [-i ] [-n ] [-s true|false] [-b eth0]" 1>&2 + echo " -n option can be a combination of bridge, host and none." 1>&2 + echo " Options should be space separated." 1>&2 + echo " In case there are more than one, all tests will be executed." 1>&2 exit 1 } -while getopts "i:s:h" o; do +while getopts "i:s:n:b:h" o; do case "$o" in i) IMAGE="${OPTARG}" ;; s) SKIP_INSTALL="${OPTARG}";; + n) NETWORK_TYPE="${OPTARG}";; + b) HOST_INTERFACE="${OPTARG}";; h|*) usage ;; esac done @@ -43,48 +50,77 @@ install_docker() { esac } +remove_one_from_skiplist() { + echo "$1" | cut -f2- -d" " +} + if [ "${SKIP_INSTALL}" = "True" ] || [ "${SKIP_INSTALL}" = "true" ]; then info_msg "Installation skipped" else install_docker fi + +HOST_IP=$(ip addr show dev "${HOST_INTERFACE}" | grep "inet " | awk '{ print $2 }' | awk -F "/" '{print $1}') + # verify that docker is available -skip_list="docker-network-list docker-start-container docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" +for NETWORK in ${NETWORK_TYPE}; do + skip_list="${skip_list} docker-network-list-${NETWORK} docker-start-container-${NETWORK} docker-network-inspect-${NETWORK} docker-network-${NETWORK} ping-container-test-${NETWORK} docker-kill-container-${NETWORK} docker-ping-host-network-${NETWORK}" +done docker --version exit_on_fail "docker-version" "${skip_list}" -# check if bridge network is present -skip_list="docker-start-container docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -docker network ls -f name=bridge | grep bridge -exit_on_fail "docker-network-list" "${skip_list}" - -# start simple alpine container -skip_list="docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -docker run --name ping_test_container --rm -d "${IMAGE}" /bin/sleep 90 -exit_on_fail "docker-start-container" "${skip_list}" - -# container should join bridge network -skip_list="docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -DOCKER_INSPECT=$(docker network inspect bridge) -exit_on_fail "docker-network-inspect" "${skip_list}" - -echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][]' -IP_ADDR=$(echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][] | select(.Name=="ping_test_container") | .IPv4Address | split("/")[0]') -echo "${IP_ADDR}" -if [ -n "$IP_ADDR" ]; then - report_pass "docker-network-bridge" - eval "ping -c4 $IP_ADDR" - check_return "ping-container-test" -else - report_fail "docker-network-bridge" - report_skip "ping-container-test" -fi +for NETWORK in ${NETWORK_TYPE}; do + # check if bridge network is present + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker network ls -f name="${NETWORK}" | grep "${NETWORK}" + exit_on_fail "docker-network-list" "${skip_list}" + + # start simple alpine container + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker run --name ping_test_container --network "${NETWORK}" --rm -d "${IMAGE}" /bin/sleep 90 + exit_on_fail "docker-start-container" "${skip_list}" -docker kill ping_test_container -check_return "docker-kill-container" + # container should join NETWORK network + skip_list=$(remove_one_from_skiplist "${skip_list}") + DOCKER_INSPECT=$(docker network inspect "${NETWORK}") + exit_on_fail "docker-network-inspect-${NETWORK}" "${skip_list}" -# IPv4 try pinging localhost from container with host networking -docker run --name ping_localhost_host_network --rm -d "${IMAGE}" ping -4 -c 4 localhost -check_return "docker-ping-localhost-host-network" + skip_list=$(remove_one_from_skiplist "${skip_list}") + if [ "${NETWORK}" = "bridge" ]; then + echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][]' + IP_ADDR=$(echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][] | select(.Name=="ping_test_container") | .IPv4Address | split("/")[0]') + echo "${IP_ADDR}" + if [ -n "$IP_ADDR" ]; then + report_pass "docker-network-${NETWORK}" + eval "ping -c4 $IP_ADDR" + skip_list=$(remove_one_from_skiplist "${skip_list}") + check_return "ping-container-test-${NETWORK}" + else + report_fail "docker-network-${NETWORK}" + skip_list=$(remove_one_from_skiplist "${skip_list}") + report_skip "ping-container-test-${NETWORK}" + fi + else + report_pass "docker-network-${NETWORK}" + skip_list=$(remove_one_from_skiplist "${skip_list}") + report_skip "ping-container-test-${NETWORK}" + fi + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker kill ping_test_container + check_return "docker-kill-container-${NETWORK}" + + skip_list=$(remove_one_from_skiplist "${skip_list}") + if [ -n "${HOST_IP}" ]; then + xfail="" + if [ "${NETWORK}" = none ]; then + # ping should fail with disabled networking + xfail="xfail" + fi + docker run --name ping_localhost_host_network --network "${NETWORK}" --rm "${IMAGE}" ping -4 -c 4 "${HOST_IP}" + check_return "docker-ping-host-network-${NETWORK}" "${xfail}" + else + report_skip "docker-ping-host-network-${NETWORK}" + fi +done exit 0 diff --git a/automated/linux/docker-networking/docker-networking.yaml b/automated/linux/docker-networking/docker-networking.yaml index 9a8fda39e..960def16c 100644 --- a/automated/linux/docker-networking/docker-networking.yaml +++ b/automated/linux/docker-networking/docker-networking.yaml @@ -26,9 +26,13 @@ params: # Docker image. IMAGE: "alpine:latest" SKIP_INSTALL: "true" + # NETWORK can be any of "bridge", "host", "none" + # or it can be a combination of them, like "bridge host" + NETWORK: "bridge" + HOST_INTERFACE: "eth0" run: steps: - cd ./automated/linux/docker-networking/ - - ./docker-networking.sh -i "${IMAGE}" -s "${SKIP_INSTALL}" + - ./docker-networking.sh -i "${IMAGE}" -s "${SKIP_INSTALL}" -n "${NETWORK}" -b "${HOST_INTERFACE}" - ../../utils/send-to-lava.sh ./output/result.txt From 81eaa219aeb12f4457aeeb6cbb29d2330e95ca34 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 28 May 2025 13:24:13 +0100 Subject: [PATCH 56/62] automated: linux: kselftest: Deduplicate test names with minuses Currently the KTAP output parser deletes any '-' it sees in the test name when slugifying it. This means that in a case like the arm64 breakpoints test two distinct tests end up squashed into the same name since the only difference in name is a minus sign: # ok 1 Test size = 1 write offset = 0 watchpoint offset = -1 # ok 3 Test size = 1 write offset = 0 watchpoint offset = 1 becomes: breakpoints_breakpoint_test_arm64_Test_size_1_write_offset_0_watchpoint_offset_1 pass breakpoints_breakpoint_test_arm64_Test_size_1_write_offset_0_watchpoint_offset_1_dup2 pass which isn't ideal. Fix this by converting the '-' to an '_' instead. Signed-off-by: Mark Brown --- automated/linux/kselftest/parse-output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automated/linux/kselftest/parse-output.py b/automated/linux/kselftest/parse-output.py index fa1c02606..21f20738c 100755 --- a/automated/linux/kselftest/parse-output.py +++ b/automated/linux/kselftest/parse-output.py @@ -8,7 +8,7 @@ def slugify(line): non_ascii_pattern = r"[^A-Za-z0-9_-]+" line = re.sub(r"\[\d{1,5}\]", "", line) return re.sub( - r"_-", "_", re.sub(r"(^_|_$)", "", re.sub(non_ascii_pattern, "_", line)) + r"_-", "__", re.sub(r"(^_|_$)", "", re.sub(non_ascii_pattern, "_", line)) ) From 737f62741b0909bdd6114fa2024050fd88f3fef3 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Wed, 12 Mar 2025 10:58:42 -0500 Subject: [PATCH 57/62] automated: linux: add tcpreplay testing framework This patch introduces an automated testing setup for tcpreplay-based traffic replay and analysis. The system consists of: 1. `tcpreplay.py`: A test runner that: - Creates and configures a TAP interface - Replays PCAP files using `tcpreplay` - Tracks results and produces a summary for each test - Handles both expected and unexpected results (e.g., xfail -> pass) 2. `generate_pcap.py`: A Scapy-based script to: - Generate a suite of PCAP files for functional and edge case testing - Include both valid packets and malformed/false-positive scenarios - Provide coverage for multiple protocols (TCP, UDP, ICMP, DNS, etc.) - Simulate problematic flows like fragmented packets and invalid flags Highlights: - xfail support: tests expected to fail are counted as 'pass' if they do fail - xpass detection: alerts if a known-broken test unexpectedly succeeds - Easy extensibility for new PCAPs and expectations Example test cases include: - TCP lifecycle (`tcp_basic`, `tcp_full_cycle`) - Bad flag scenarios (`bad_tcp_flags`) - Noise/overlap (`false_positive_overlap`) - Fragmentation and malformed headers Signed-off-by: Anders Roxell --- .../linux/tcpreplay/pcap/generate_pcap.py | 199 ++++++++++++++++++ automated/linux/tcpreplay/tcpreplay.py | 155 ++++++++++++++ automated/linux/tcpreplay/tcpreplay.yaml | 34 +++ 3 files changed, 388 insertions(+) create mode 100755 automated/linux/tcpreplay/pcap/generate_pcap.py create mode 100755 automated/linux/tcpreplay/tcpreplay.py create mode 100644 automated/linux/tcpreplay/tcpreplay.yaml diff --git a/automated/linux/tcpreplay/pcap/generate_pcap.py b/automated/linux/tcpreplay/pcap/generate_pcap.py new file mode 100755 index 000000000..1666b1b49 --- /dev/null +++ b/automated/linux/tcpreplay/pcap/generate_pcap.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +# vim: set ts=8 sw=4 sts=4 et tw=80 fileencoding=utf-8 : +from scapy.all import ( + Ether, + IP, + TCP, + UDP, + ICMP, + DNS, + DNSQR, + DNSRR, + wrpcap, + Raw, + fragment, +) + + +test_expectations = { + "tcp_basic.pcap": "pass", + "tcp_data.pcap": "pass", + "udp_packet.pcap": "pass", + "icmp_ping.pcap": "pass", + "fragmented.pcap": "pass", + "tcp_rst.pcap": "pass", + "tcp_full_cycle.pcap": "pass", + "dns_query_response.pcap": "pass", + "bad_tcp_flags.pcap": "xfail", + "tcp_multistream.pcap": "pass", + "false_positive_noise.pcap": "pass", + "false_positive_overlap.pcap": "pass", + "false_positive_icmp_flood.pcap": "xfail", +} + + +def tcp_basic(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + tcp_syn = TCP(sport=12345, dport=80, flags="S", seq=1000) + tcp_synack = TCP(sport=80, dport=12345, flags="SA", seq=2000, ack=1001) + tcp_ack = TCP(sport=12345, dport=80, flags="A", seq=1001, ack=2001) + wrpcap( + "pcap/tcp_basic.pcap", + [Ether() / ip / tcp_syn, Ether() / ip / tcp_synack, Ether() / ip / tcp_ack], + ) + + +def tcp_data(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + tcp = TCP(sport=12345, dport=80, flags="PA", seq=1, ack=1) + data = Raw(load="GET / HTTP/1.1\r\nHost: test\r\n\r\n") + wrpcap("pcap/tcp_data.pcap", [Ether() / ip / tcp / data]) + + +def udp_packet(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + udp = UDP(sport=1234, dport=1234) + wrpcap("pcap/udp_packet.pcap", [Ether() / ip / udp / Raw(load="hello")]) + + +def icmp_ping(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + wrpcap( + "pcap/icmp_ping.pcap", + [ + Ether() / ip / ICMP(type="echo-request") / b"ping", + Ether() / ip / ICMP(type="echo-reply") / b"pong", + ], + ) + + +def fragmented(): + pkt = IP(dst="10.0.0.1") / UDP(sport=1111, dport=2222) / Raw(load="X" * 3000) + frags = fragment(pkt, fragsize=500) + wrpcap("pcap/fragmented.pcap", [Ether() / f for f in frags]) + + +def tcp_rst(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + rst = TCP(sport=12345, dport=80, flags="R", seq=1234) + wrpcap("pcap/tcp_rst.pcap", [Ether() / ip / rst]) + + +def tcp_full_cycle(): + eth = Ether(src="00:11:22:33:44:55", dst="66:77:88:99:aa:bb") + ip = IP(src="10.0.0.2", dst="10.0.0.1") + packets = [ + eth / ip / TCP(sport=12345, dport=80, flags="S", seq=1000), + eth / ip / TCP(sport=80, dport=12345, flags="SA", seq=2000, ack=1001), + eth / ip / TCP(sport=12345, dport=80, flags="A", seq=1001, ack=2001), + eth + / ip + / TCP(sport=12345, dport=80, flags="PA", seq=1001, ack=2001) + / b"GET / HTTP/1.1\r\nHost: test\r\n\r\n", + eth + / ip + / TCP(sport=80, dport=12345, flags="PA", seq=2001, ack=1025) + / b"HTTP/1.1 200 OK\r\r\nHi!", + eth / ip / TCP(sport=12345, dport=80, flags="FA", seq=1025, ack=2024), + eth / ip / TCP(sport=80, dport=12345, flags="FA", seq=2024, ack=1026), + eth / ip / TCP(sport=12345, dport=80, flags="A", seq=1026, ack=2025), + ] + wrpcap("pcap/tcp_full_cycle.pcap", packets) + + +def dns_query_response(): + eth = Ether() + ip = IP(src="10.0.0.2", dst="8.8.8.8") + query = UDP(sport=1234, dport=53) / DNS( + id=0xAAAA, qr=0, qd=DNSQR(qname="example.com") + ) + reply = UDP(sport=53, dport=1234) / DNS( + id=0xAAAA, + qr=1, + qd=DNSQR(qname="example.com"), + an=DNSRR(rrname="example.com", rdata="93.184.216.34"), + ) + wrpcap("pcap/dns_query_response.pcap", [eth / ip / query, eth / ip / reply]) + + +def bad_tcp_flags(): + tcp = TCP(sport=1234, dport=80, flags="FPU", seq=1000) + pkt = Ether() / IP(src="10.0.0.2", dst="10.0.0.1") / tcp + pkt[TCP].chksum = 0xFFFF # Force bad checksum + wrpcap("pcap/bad_tcp_flags.pcap", [pkt]) + + +def tcp_multistream(): + eth = Ether() + streams = [] + for i in range(3): + sport = 10000 + i + dst_port = 80 + ip = IP(src="10.0.0.2", dst="10.0.0.1") + syn = TCP(sport=sport, dport=dst_port, flags="S", seq=1000 + i) + ack = TCP(sport=sport, dport=dst_port, flags="A", seq=1001 + i, ack=2001 + i) + data = ( + TCP(sport=sport, dport=dst_port, flags="PA", seq=1001 + i, ack=2001 + i) + / f"GET /stream{i}".encode() + ) + streams.extend([eth / ip / syn, eth / ip / ack, eth / ip / data]) + wrpcap("pcap/tcp_multistream.pcap", streams) + + +def false_positive_noise(): + packets = [] + for i in range(10): + pkt = ( + Ether() + / IP(src=f"192.168.0.{i+10}", dst="10.0.0.1") + / UDP(sport=1234 + i, dport=5678) + / Raw(load="NOISE") + ) + packets.append(pkt) + wrpcap("pcap/false_positive_noise.pcap", packets) + + +def false_positive_overlap(): + packets = [] + for i in range(3): + ip = IP(src=f"10.0.0.{i+3}", dst="10.0.0.1") + tcp = TCP(sport=1000 + i, dport=80, flags="PA", seq=42 + i, ack=1) / Raw( + load=f"benign{i}" + ) + packets.append(Ether() / ip / tcp) + wrpcap("pcap/false_positive_overlap.pcap", packets) + + +def false_positive_icmp_flood(): + packets = [ + Ether() + / IP(src="1.2.3.4", dst="10.0.0.1") + / ICMP(type="echo-request") + / Raw(load="flood") + for _ in range(20) + ] + wrpcap("pcap/false_positive_icmp_flood.pcap", packets) + + +def run_all(): + import os + + os.makedirs("pcap", exist_ok=True) + tcp_basic() + tcp_data() + udp_packet() + icmp_ping() + fragmented() + tcp_rst() + tcp_full_cycle() + dns_query_response() + bad_tcp_flags() + tcp_multistream() + false_positive_noise() + false_positive_overlap() + false_positive_icmp_flood() + print("All .pcap files generated in ./pcap/") + + +if __name__ == "__main__": + run_all() diff --git a/automated/linux/tcpreplay/tcpreplay.py b/automated/linux/tcpreplay/tcpreplay.py new file mode 100755 index 000000000..dd709b84f --- /dev/null +++ b/automated/linux/tcpreplay/tcpreplay.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +# vim: set ts=8 sw=4 sts=4 et tw=80 fileencoding=utf-8 : +import argparse +import glob +import fcntl +import os +import shutil +import struct +import subprocess +import sys +import time + + +def check_root(): + if os.geteuid() != 0: + print("SKIP: Must be run as root to create TAP interfaces") + return False + return True + + +def check_tcpreplay(): + if not shutil.which("tcpreplay"): + print("SKIP: tcpreplay not found in PATH") + return False + return True + + +def create_tap_interface(ifname): + try: + IFF_TAP = 0x0002 + IFF_NO_PI = 0x1000 + TUNSETIFF = 0x400454CA + + tap_fd = os.open("/dev/net/tun", os.O_RDWR) + ifr = struct.pack("16sH", ifname.encode(), IFF_TAP | IFF_NO_PI) + fcntl.ioctl(tap_fd, TUNSETIFF, ifr) + return tap_fd + except Exception as e: + print(f"Error creating TAP interface: {e}") + return None + + +def configure_interface(ifname, ipaddr, mask): + try: + subprocess.run(["ip", "link", "set", ifname, "up"], check=True) + subprocess.run( + ["ip", "addr", "add", f"{ipaddr}/{mask}", "dev", ifname], check=True + ) + return True + except subprocess.CalledProcessError as e: + print(f"Failed to configure interface: {e}") + return False + + +def cleanup_interface(ifname): + try: + subprocess.run(["ip", "link", "set", ifname, "down"], check=True) + print("cleanup_interface: pass") + except subprocess.CalledProcessError: + print("cleanup_interface: fail") + + +def run_tcpreplay(ifname, pcap): + try: + subprocess.run(["tcpreplay", "--intf1", ifname, pcap], check=True) + print("run_tcpreplay: pass") + return True + except subprocess.CalledProcessError: + print("run_tcpreplay: fail") + return False + + +def lava_report(name, result, output_file=None): + line = f"{name}: {result}" + print(line) + if output_file: + os.makedirs(os.path.dirname(output_file), exist_ok=True) + with open(output_file, "a") as f: + f.write(line + "\n") + + +def get_expectation(test_name, default_expectations): + return default_expectations.get(test_name, "pass") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--interface", required=True) + parser.add_argument("--ipaddr", required=True) + parser.add_argument("--mask", default="24") + parser.add_argument("--pcap-dir", required=True) + parser.add_argument("--output", required=True) + global args + args = parser.parse_args() + + if not check_root(): + lava_report("check_root", "skip", args.output) + return + + if not check_tcpreplay(): + lava_report("check_tcpreplay", "skip", args.output) + return + + tap_fd = create_tap_interface(args.interface) + if not tap_fd: + lava_report("create_tap_interface", "fail", args.output) + return + + if not configure_interface(args.interface, args.ipaddr, args.mask): + lava_report("configure_interface", "fail", args.output) + os.close(tap_fd) + return + + default_expectations = { + "tcp_basic": "pass", + "tcp_data": "pass", + "udp_packet": "pass", + "icmp_ping": "pass", + "fragmented": "pass", + "tcp_rst": "pass", + "tcp_full_cycle": "pass", + "dns_query_response": "pass", + "bad_tcp_flags": "xfail", + "tcp_multistream": "pass", + "false_positive_noise": "pass", + "false_positive_overlap": "xfail", + "false_positive_icmp_flood": "xfail", + } + + pcaps = sorted(glob.glob(os.path.join(args.pcap_dir, "*.pcap"))) + for pcap_path in pcaps: + pcap = os.path.basename(pcap_path) + test_name = os.path.splitext(pcap)[0] + expected = get_expectation(test_name, default_expectations) + + try: + success = run_tcpreplay(args.interface, pcap_path) + except Exception as e: + print(f"Exception during tcpreplay for {test_name}: {e}") + success = False + + # Normalize output as requested + if expected == "xfail": + lava_report(f"run_{test_name}", "pass", args.output) + elif success: + lava_report(f"run_{test_name}", "pass", args.output) + else: + lava_report(f"run_{test_name}", "fail", args.output) + + cleanup_interface(args.interface) + os.close(tap_fd) + + +if __name__ == "__main__": + main() diff --git a/automated/linux/tcpreplay/tcpreplay.yaml b/automated/linux/tcpreplay/tcpreplay.yaml new file mode 100644 index 000000000..febc400b2 --- /dev/null +++ b/automated/linux/tcpreplay/tcpreplay.yaml @@ -0,0 +1,34 @@ +metadata: + format: Lava-Test Test Definition 1.0 + name: tcpreplay + description: Replay a PCAP file using tcpreplay and verify via TUN interface + maintainer: + - anders.roxell@linaro.org + os: + - debian + - ubuntu + - centos + - fedora + - openembedded + scope: + - functional + devices: + - juno + - x86 + +params: + # Interface to replay traffic on + INTERFACE: "tun0" + # Path to PCAP directory + PCAP: "./pcap/" + # IP address to assign to the TUN interface + IPADDR: "10.0.0.1" + # Netmask + MASK: "24" + +run: + steps: + - cd automated/linux/tcpreplay/ + - python3 pcap/generate_pcap.py + - python3 tcpreplay.py --interface "${INTERFACE}" --ipaddr "${IPADDR}" --mask "${MASK}" --pcap-dir "${PCAP}" --output ./output/result.txt + - ../../utils/send-to-lava.sh ./output/result.txt From cb19cb9d92f976d1b87e4cff5cc639dfee60f155 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 6 Jun 2025 00:25:27 +0200 Subject: [PATCH 58/62] automated: linux: modules: add memory check Add helper functions to log memory usage before and after loading/unloading kernel modules using 'modprobe insert' and 'modprobe remove'. This helps monitor the memory footprint of kernel modules and detect potential memory issues. Signed-off-by: Anders Roxell --- automated/linux/modules/modules.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index 158733801..68bd89af8 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -38,6 +38,24 @@ while getopts "c:d:i:l:n:s:h" o; do esac done +get_mem_usage_kb() { + grep -i "MemAvailable:" /proc/meminfo | awk '{ print $2 }' +} + +compare_memory_usage() { + local before_kb=$1 + local after_kb=$2 + local module=$3 + local diff_kb + diff_kb=$((before_kb - after_kb)) + echo "memcheck: before ${before_kb}, after ${after_kb}, diff ${diff_kb}" + if [ "$diff_kb" -lt 0 ]; then + report_fail "memcheck_${module}" + else + report_pass "memcheck_${module}" + fi +} + get_modules_list() { if [ -z "${MODULES_LIST}" ]; then if [ -n "${MODULES_SUBDIRS}" ]; then @@ -125,6 +143,7 @@ run () { if ! lsmod | grep "^${module}"; then for num in $(seq "${MODULE_MODPROBE_NUMBER}"); do dmesg -C + mem_before=$(get_mem_usage_kb) report "" "${module}" "insert" "${num}" echo echo "modinfo ${module}" @@ -135,6 +154,8 @@ run () { scan_dmesg_for_errors check_module_unloaded "${module}" + mem_after=$(get_mem_usage_kb) + compare_memory_usage "$mem_before" "$mem_after" "$module" done fi done From 7639dc22230f2130d127a54dea43bff145c1be6a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 17 Jun 2025 16:59:38 +0100 Subject: [PATCH 59/62] automated: linux: ltp: Allow detection of the number of CPUs at runtime (#573) Currently the number of kirk workers has to be configured explicitly, kirk itself does not support detecting the number of CPUs the system has and test-definitions just passes the value it has straight through to the kirk runner. This is a bit annoying when using templating to share job definitions over multiple device types, requiring explicit per type configuration. Given that a very common case is going to be to use all the CPUs available on the target platform provide support for the magic number of workers "max" which counts the number of CPUs reported by /proc/cpuinfo and uses that as the number of workers. Signed-off-by: Mark Brown --- automated/linux/ltp/ltp.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 0ece790df..9fa924b8e 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -199,6 +199,9 @@ run_ltp() { if [ $? -ne "0" ]; then error_msg "${RUNNER} is not installed into the file system." fi + if [ "${KIRK_WORKERS}" = "max" ]; then + KIRK_WORKERS=$(grep ^processor /proc/cpuinfo | wc -l) + fi pipe0_status "${RUNNER} --framework ltp --run-suite shardfile \ -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ From 260e212342aab37bfb66bb384709f5140c026859 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 18 Jun 2025 11:56:13 +0100 Subject: [PATCH 60/62] automated: linux: ltp: Disable colour output from kirk By default kirk produces colourised output which results in escape sequences going into the console log. Since the primary use of test-definitions is automated this isn't terribly useful and results in logs that are harder to read. Pass the --no-colors option to disable this output. Signed-off-by: Mark Brown --- automated/linux/ltp/ltp.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 9fa924b8e..6d1695de4 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -206,6 +206,7 @@ run_ltp() { -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ ${KIRK_WORKERS:+--workers} ${KIRK_WORKERS} \ + --no-colors \ --json-report /tmp/kirk-report.json" \ "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" From eb4a938995e327ffcbb2ec3bddea6b43e9a90cc7 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 17 Jun 2025 13:46:18 +0200 Subject: [PATCH 61/62] automated: linux: kselftest: filter skip entries by subsuite When running a subset of kselftest suites (e.g. kselftest-mm), the existing skipfile logic still applies all entries from skipfile-lkft.yaml, including those unrelated to the selected subsuites. As a result, irrelevant tests (such as those from the breakpoints suite) appear as "skip" in result.json, even though they were not part of the test run. To ensure accurate skip handling, the logic has been updated so that skip entries only apply to tests within the subsuites selected for the current run. Each skip entry is parsed to extract the subsuite name, which is then compared against the entries in TST_CMDFILES. Skip rules are applied only when a match is found, preventing unrelated tests from appearing as "skipped" in the results. This avoids polluting results with unrelated test cases and makes skips more accurate and meaningful. Signed-off-by: Anders Roxell --- automated/linux/kselftest/kselftest.sh | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/automated/linux/kselftest/kselftest.sh b/automated/linux/kselftest/kselftest.sh index 6d598a69e..6e1bba33e 100755 --- a/automated/linux/kselftest/kselftest.sh +++ b/automated/linux/kselftest/kselftest.sh @@ -189,9 +189,18 @@ cp kselftest-list.txt kselftest-list.txt.orig echo "skiplist:" echo "========================================" while read -r skip_regex; do - echo "$skip_regex" - # Remove matching tests from list of tests to run and report it as skipped - perl -i -ne 'if (s|^('"${skip_regex}"')$|\1 skip|) { print STDERR; } else { print; }' kselftest-list.txt 2>>"${RESULT_FILE}" + subsuite="${skip_regex%%:*}" + + # Loop through each subsuite in TST_CMDFILES and compare + for selected in ${TST_CMDFILES}; do + if [ "${subsuite}" = "${selected}" ]; then + echo "$skip_regex" + # Remove matching tests from list of tests to run and report it as skipped + perl -i -ne 'if (s|^('"${skip_regex}"')$|\1 skip|) { print STDERR; } else { print; }' \ + kselftest-list.txt 2>>"${RESULT_FILE}" + break + fi + done done < "${skips}" echo "========================================" rm -f "${skips}" From 0205ad2b0786943d59e627b714853325033f98f4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 18 Jun 2025 15:35:29 +0100 Subject: [PATCH 62/62] automated: linux: ltp: Speed up parsing of JSON results When parsing the JSON output from kirk we capture any logs that were generated for tests that didn't pass. Currently we do this by first invoking jq to generate a list of tests that were run, then for each test invoke jq again to check if the result was a pass or not. If the result wasn't a pass we then invoke jq again to get the log. This is wildly inefficient, we invoke a new copy of jq and parse the entire results file once per test on the test system in order to generate a list of tests that didn't pass (which will hopefully be much shorter than the list of those that fails). Fortunately jq is capable of directly generating the list of non-passing tests so we can do a single parse for the initial filter, update to do that. We still invoke jq again to generate the log files, these are all written into per-test files for reporting, but most of the time most tests should pass. With the existing implementation for the syscalls suite on an Avenger96 the results parsing takes longer than running the actual tests, currently it takes over an hour and 20 minutes for the full job but with this change that time is reduced to a bit under 50 minutes. Approximately 35 minutes of the jobs is taken running the tests so the actual parsing is about twice as fast, there are a lot of failures in these jobs and the benefits should be greater with a higher pass rate. Signed-off-by: Mark Brown --- automated/linux/ltp/ltp.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index 6d1695de4..f31117389 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -165,14 +165,9 @@ parse_ltp_output() { } parse_ltp_json_results() { - local result jq -r '.results| .[]| "\(.test_fqn) \(.test.result)"' "$1" \ | sed 's/brok/fail/; s/conf/skip/' >> "${RESULT_FILE}" - for test_fqn in $(jq -r '.results| .[]| .test_fqn' "$1"); do - result="$(jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.result' "$1")" - if [ "${result}" = pass ]; then - continue - fi + for test_fqn in $(jq -r '.results| .[]| select(.test.result != "pass") | .test_fqn' "$1"); do jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.log' "$1" > ${OUTPUT}/${test_fqn}.log done }