diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ec7551c16..fc6bd92e1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -21,7 +21,7 @@ jobs: - name: Install current plugin run: | - python mkdocs_plugin/setup.py install --force + pip install mkdocs_plugin/ pip freeze - name: Build docs diff --git a/automated/android/bootchart/device-script.sh b/automated/android/bootchart/device-script.sh index 902616ad5..db332a122 100755 --- a/automated/android/bootchart/device-script.sh +++ b/automated/android/bootchart/device-script.sh @@ -1,9 +1,11 @@ #!/system/bin/sh # shellcheck disable=SC2181 +# shellcheck disable=SC2320 # # script to start and stop bootchart test. # # Copyright (C) 2014, Linaro Limited. +# Copyright (C) 2025, Qualcomm Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License diff --git a/automated/android/kunit/kunit.sh b/automated/android/kunit/kunit.sh new file mode 100755 index 000000000..e958018b3 --- /dev/null +++ b/automated/android/kunit/kunit.sh @@ -0,0 +1,146 @@ +#!/bin/bash -ex + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +DIR_OUTPUT="$(pwd)/output" +mkdir -p "${DIR_OUTPUT}" +RESULT_FILE="${DIR_OUTPUT}/result.txt" +export RESULT_FILE + +# shellcheck disable=SC1091 +. ../../lib/android-test-lib + +RETRY_COUNT=5 +RETRY_INTERVAL=2 + +TESTS_ZIP_URL="" +SQUAD_UPLOAD_URL="" +TRADEFED_PREBUILTS_GIT_URL="https://android.googlesource.com/platform/tools/tradefederation/prebuilts" + +F_TESTS_ZIP="$(pwd)/tests.zip" +DIR_TESTS="$(pwd)/tests" +DIR_TEST_LOGS="${DIR_OUTPUT}/test-logs" +F_KUNIT_LOG="${DIR_TEST_LOGS}/kunit.log" +DIR_TF_PREBUILTS="$(pwd)/prebuilts" + +function usage(){ + echo "Usage: $0 -u [ -s ]" 1>&2 + exit 1 +} + +function upload_logs_to_squad(){ + if [ -z "${SQUAD_UPLOAD_URL}" ]; then + return + fi + # Upload test log and result files to artifactorial. + name_dir_output=$(basename "${DIR_OUTPUT}") + if ! tar caf "kunit-output-$(date +%Y%m%d%H%M%S).tar.xz" "${name_dir_output}"; then + error_fatal "tradefed - failed to collect results and log files [$ANDROID_SERIAL]" + fi + ATTACHMENT=$(ls kunit-output-*.tar.xz) + ../../utils/upload-to-squad.sh -a "${ATTACHMENT}" -u "${SQUAD_UPLOAD_URL}" +} + +function parse_kunit_log(){ + local f_kunit_log="${1}" + local f_kunit_stub_log="${DIR_TEST_LOGS}/kunit_stub.log" + + if [ -z "${f_kunit_log}" ] || [ ! -f "${f_kunit_log}" ]; then + echo "KUnit log does not exist" + return + fi + # grep the stub log to a single file and parsing the results + # 20:43:20 stub: soc-utils-test.soc-utils#test_snd_soc_params_to_bclk: PASSED (0ms) + # 00:21:09 stub: kunit-example-test.example_init#example_init_test: PASSED (0ms) + # | cut -d: -f4- \ # kunit-example-test.example_init#example_init_test: PASSED (0ms) + # | tr -d ':' \ # kunit-example-test.example_init#example_init_test PASSED (0ms) + # | awk '{print $1, $2}' \ # kunit-example-test.example_init#example_init_test PASSED + # | sort | uniq \ # to filter out the duplication of FAILURE in Result Summary part + grep "stub:" "${f_kunit_log}" \ + | cut -d: -f4- \ + | tr -d ':' \ + | awk '{print $1, $2}' \ + | sort | uniq \ + > "${f_kunit_stub_log}" + while read -r line; do + # kunit-example-test.example_init#example_init_test PASSED + # kunit-example-test.example#example_skip_test IGNORED + # soc-utils-test#soc-utils-test FAILURE + test_case_name=$(echo "${line}"|awk '{print $1}') + test_case_result=$(echo "${line}"|awk '{print $2}') + + # reformat the test case name to avoid potential confusions + # being caused by some special characters + test_case_name=$(echo "${test_case_name}" \ + | tr -c '#@/+,[:alnum:]:.-' '_' \ + | tr -s '_' \ + | sed 's/_$//' \ + ) + + case "X${test_case_result}" in + "XPASSED") + report_pass "${test_case_name}" + ;; + "XIGNORED") + report_skip "${test_case_name}" + ;; + "XFAILURE") + report_fail "${test_case_name}" + ;; + *) + report_unknown "${test_case_name}" + ;; + esac + done < "${f_kunit_stub_log}" +} + +while getopts "u:s:h" o; do + case "$o" in + u) TESTS_ZIP_URL="${OPTARG}" ;; + s) SQUAD_UPLOAD_URL="${OPTARG}" ;; + h|*) usage ;; + esac +done + +# export ANDROID_SERIAL +initialize_adb + +if [ -z "${TESTS_ZIP_URL}" ]; then + echo "The TESTS_ZIP_URL must be specified." + exit 1 +fi + +# download and unzip tests.zip +rm -f "${F_TESTS_ZIP}" && \ + curl --retry "${RETRY_COUNT}" --retry-delay "${RETRY_INTERVAL}" -fsSL "${TESTS_ZIP_URL}" -o "${F_TESTS_ZIP}" +rm -fr "${DIR_TESTS}" && \ + mkdir -p "${DIR_TESTS}" && \ + unzip -o "${F_TESTS_ZIP}" -d "${DIR_TESTS}" + +# clone the tradefed prebuilts repository +i=1 +while [ $i -le "${RETRY_COUNT}" ]; do + rm -fr "${DIR_TF_PREBUILTS}" + if git clone --depth 1 "${TRADEFED_PREBUILTS_GIT_URL}" "${DIR_TF_PREBUILTS}"; then + break + fi + + # try again in ${RETRY_INTERVAL} seconds + sleep "${RETRY_INTERVAL}" + i=$((i + 1)) +done + +# run the kunit test +mkdir -p "${DIR_TEST_LOGS}" +prebuilts/filegroups/tradefed/tradefed.sh \ + run commandAndExit \ + template/local_min \ + --template:map test=suite/test_mapping_suite \ + --include-filter kunit \ + --tests-dir="${DIR_TESTS}" \ + --log-file-path="${DIR_TEST_LOGS}" \ + -s "${ANDROID_SERIAL}" |& tee "${F_KUNIT_LOG}" + +parse_kunit_log "${F_KUNIT_LOG}" + +upload_logs_to_squad diff --git a/automated/android/kunit/kunit.yaml b/automated/android/kunit/kunit.yaml new file mode 100644 index 000000000..a38f060d7 --- /dev/null +++ b/automated/android/kunit/kunit.yaml @@ -0,0 +1,34 @@ +metadata: + name: kunit-tests + format: "Lava-Test Test Definition 1.0" + description: | + Run the KUnit test on Android based on the tradefed framework + provided by google. + maintainer: + - yongqin.liu@linaro.org + os: + - android + devices: + - db845c + - rb5 + - sm8550 + scope: + - functional + +params: + # The url of the tests.zip file generated during the kernel build, + # which includes files for the kunit modules and necessary configurations + TESTS_ZIP_URL: "" + # The SQUAD url to be used to upload the result and log files. + # see https://squad.readthedocs.io/en/latest/intro.html#submitting-results. + # SQUAD_ARCHIVE_SUBMIT_TOKEN is used for uploading authentication, + # and must be defined by the submitter as one profile managed token + SQUAD_UPLOAD_URL: "" + +run: + steps: + - cd ./automated/android/kunit + # Run setup.sh in the original shell to reserve env variables. + - ./kunit.sh -u "${TESTS_ZIP_URL}" -s "${SQUAD_UPLOAD_URL}" + # Send test result to LAVA. + - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh b/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh index 640935dd0..0f9902a56 100755 --- a/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh +++ b/automated/android/media-codecs-functional-tests/linaro-android-userspace-tests.sh @@ -4,6 +4,7 @@ # shellcheck disable=SC2181 # shellcheck disable=SC2155 # shellcheck disable=SC2166 +# shellcheck disable=SC2320 # shellcheck disable=SC3006 # shellcheck disable=SC3010 # shellcheck disable=SC3018 @@ -12,6 +13,7 @@ # shellcheck disable=SC3060 ############################################################################# # Copyright (c) 2014 Linaro +# Copyright (c) 2025 Qualcomm Inc # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # which accompanies this distribution, and is available at @@ -19,6 +21,7 @@ # # Contributors: # Linaro +# Milosz Wasilewski ############################################################################# # Individual and batch test definitions diff --git a/automated/android/noninteractive-tradefed/setup.sh b/automated/android/noninteractive-tradefed/setup.sh index 0c8f004a4..1fd3b546b 100755 --- a/automated/android/noninteractive-tradefed/setup.sh +++ b/automated/android/noninteractive-tradefed/setup.sh @@ -5,10 +5,15 @@ . ../../lib/sh-test-lib . ../../lib/android-test-lib -java_path="/usr/lib/jvm/java-11-openjdk-amd64/bin/java" +## To enable running x86_64 binary on aarch64 host or inside container of it +java_path_arch_str="amd64" +if [ "X$(uname -m)" = "Xaarch64" ]; then + java_path_arch_str="arm64" +fi +java_path="/usr/lib/jvm/java-11-openjdk-${java_path_arch_str}/bin/java" if [ -n "${ANDROID_VERSION}" ] && echo "${ANDROID_VERSION}" | grep -E -q "aosp-android14|aosp-main"; then # use openjdk-17 for Android14+ versions - java_path="/usr/lib/jvm/java-17-openjdk-amd64/bin/java" + java_path="/usr/lib/jvm/java-17-openjdk-${java_path_arch_str}/bin/java" fi dist_name diff --git a/automated/android/noninteractive-tradefed/tradefed.sh b/automated/android/noninteractive-tradefed/tradefed.sh index 4e5804f8a..f7689af65 100755 --- a/automated/android/noninteractive-tradefed/tradefed.sh +++ b/automated/android/noninteractive-tradefed/tradefed.sh @@ -123,7 +123,18 @@ file_name=$(basename "${TEST_URL}") if echo "${TEST_URL}" | grep "^http://lkft-cache.lkftlab/" ; then NO_PROXY=.lkftlab wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" elif echo "${TEST_URL}" | grep "^http" ; then - wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" + # using kisscache to download the file, based on the following change: + # https://gitlab.com/lava/lava/-/merge_requests/2734 + # shellcheck disable=SC2153 + if [ -n "${HTTP_CACHE}" ]; then + # and it's in the format like this: + # https://cache.lavasoftware.org/api/v1/fetch/?url=%s + # so need to remove "%s" first here + http_cache=$(echo "${HTTP_CACHE}"|sed 's|%s||') + wget -S --progress=dot:giga "${http_cache}${TEST_URL}" -O "${file_name}" + else + wget -S --progress=dot:giga "${TEST_URL}" -O "${file_name}" + fi else cp "${TEST_URL}" "./${file_name}" fi diff --git a/automated/lib/parse_rt_tests_results.py b/automated/lib/parse_rt_tests_results.py index 9cc0f1e4f..5c966c590 100755 --- a/automated/lib/parse_rt_tests_results.py +++ b/automated/lib/parse_rt_tests_results.py @@ -69,9 +69,9 @@ def parse_json(testname, filename): # rlta timertat also knows about irqs parse_irqs(rawdata) - elif "inversions" in rawdata: + elif "inversion" in rawdata: # pi_stress - print("inversion {}\n".format(rawdata("inversions"))) + print("inversion pass {} count\n".format(rawdata["inversion"])) if int(rawdata["return_code"]) == 0: print("{} pass".format(testname)) diff --git a/automated/lib/py_util_lib.py b/automated/lib/py_util_lib.py index a3da5ebfb..e695017b0 100644 --- a/automated/lib/py_util_lib.py +++ b/automated/lib/py_util_lib.py @@ -1,5 +1,4 @@ -"""Shared Python 3 utility code. -""" +"""Shared Python 3 utility code.""" from pathlib import Path import subprocess diff --git a/automated/lib/sh-test-lib b/automated/lib/sh-test-lib index bce0e716e..c38bf4d25 100755 --- a/automated/lib/sh-test-lib +++ b/automated/lib/sh-test-lib @@ -102,15 +102,16 @@ exit_on_skip() { check_return() { # shellcheck disable=SC2039 local exit_code="$?" - [ "$#" -ne 1 ] && error_msg "Usage: check_return test_case" + [ "$#" -lt 1 ] && error_msg "Usage: check_return test_case [xfail]" # shellcheck disable=SC2039 local test_case="$1" + local xfail="${2:-}" - if [ "${exit_code}" -ne 0 ]; then - echo "${test_case} fail" | tee -a "${RESULT_FILE}" + if [ "${exit_code}" -ne 0 ] && [ -z "${xfail}" ]; then + report_fail "${test_case}" return "${exit_code}" else - echo "${test_case} pass" | tee -a "${RESULT_FILE}" + report_pass "${test_case}" return 0 fi } @@ -237,7 +238,7 @@ dist_name() { fi # convert dist to lower case - dist=$(echo ${dist} | tr '[:upper:]' '[:lower:]') + dist=$(echo "${dist}" | tr '[:upper:]' '[:lower:]') case "${dist}" in rpb*) dist="oe-rpb" ;; esac @@ -336,7 +337,6 @@ validate_check_sum() { if [ "$#" -ne 2 ]; then warn_msg "The number of parameters should be 2" error_msg "Usage: validate_check_sum filename known_sha256sum" - return 1 fi # shellcheck disable=SC2039 local OUTPUT_FILE_NAME="$1" diff --git a/automated/linux/alsa-bat/bat.sh b/automated/linux/alsa-bat/bat.sh new file mode 100755 index 000000000..39b542643 --- /dev/null +++ b/automated/linux/alsa-bat/bat.sh @@ -0,0 +1,72 @@ +#!/bin/sh -e +# shellcheck disable=SC1091 + +OUTPUT="$(pwd)/output" +RESULT_FILE="${OUTPUT}/result.txt" + +. ../../lib/sh-test-lib + +create_out_dir "${OUTPUT}" + +PARAMS= + +if [ "${TST_CARD}" != "" ]; then + PARAMS="${PARAMS} -D${TST_CARD}" +fi + +if [ "${TST_CHANNELS}" != "" ]; then + PARAMS="${PARAMS} -c${TST_CHANNELS}" +fi + +if [ "${TST_PLAYBACK}" != "" ]; then + PARAMS="${PARAMS} -P${TST_PLAYBACK}" +fi + +if [ "${TST_CAPTURE}" != "" ]; then + PARAMS="${PARAMS} -C${TST_CAPTURE}" +fi + +if [ "${TST_FORMAT}" != "" ]; then + PARAMS="${PARAMS} -f${TST_FORMAT}" +fi + +if [ "${TST_RATE}" != "" ]; then + PARAMS="${PARAMS} -r${TST_RATE}" +fi + +if [ "${TST_LENGTH}" != "" ]; then + PARAMS="${PARAMS} -n${TST_LENGTH}" +fi + +if [ "${TST_SIGMA_K}" != "" ]; then + PARAMS="${PARAMS} -k${TST_SIGMA_K}" +fi + +if [ "${TST_FREQ}" != "" ]; then + PARAMS="${PARAMS} -F${TST_FREQ}" +fi + +# Debian installs as alsabat due to name collisions +if [ "$(command -v alsabat)" != "" ]; then + BAT=alsabat +elif [ "$(command -v bat)" != "" ]; then + BAT=bat +fi + +if [ "${BAT}" = "" ]; then + echo Unable to find BAT + exit 1 +fi + +TEST_NAME="$(echo "bat${PARAMS}" | sed 's/ /_/g' | sed 's/-//g')" + +# Return code 0 for pass, other codes for various fails +if ${BAT} ${PARAMS} --log=${OUTPUT}/${TEST_NAME}.log ; then + R=pass +else + R=fail +fi + +echo ${TEST_NAME} ${R} >> ${RESULT_FILE} + +../../utils/send-to-lava.sh ${RESULT_FILE} diff --git a/automated/linux/alsa-bat/bat.yaml b/automated/linux/alsa-bat/bat.yaml new file mode 100644 index 000000000..33b9d54a4 --- /dev/null +++ b/automated/linux/alsa-bat/bat.yaml @@ -0,0 +1,47 @@ +metadata: + name: alsabat + format: "Lava-Test Test Definition 1.0" + description: "Run the ALSA Basic Audio Test" + maintainer: + - broonie@kernel.org + os: + - debian + - ubuntu + - fedora + - centos + - oe + scope: + - functional + devices: + - all + +params: + # Number of audio channel to use + TST_CHANNELS: "" + + # Playback device + TST_PLAYBACK: "" + + # Capture device + TST_CAPTURE: "" + + # Sample format + TST_FORMAT: "" + + # Sample rate + TST_RATE: "" + + # Duration of generated signal + TST_LENGTH: "" + + # Sigma k for analysis + TST_SIGMA_K: "" + + # Target frequency + TST_FREQ: "" + +run: + steps: + - cd ./automated/linux/alsa-bat + - ./bat.sh + - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/alsa-smoke/alsa-smoke.sh b/automated/linux/alsa-smoke/alsa-smoke.sh new file mode 100755 index 000000000..e6dfdaca0 --- /dev/null +++ b/automated/linux/alsa-smoke/alsa-smoke.sh @@ -0,0 +1,65 @@ +#!/bin/sh + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +OUTPUT="$(pwd)/output" +RESULT_FILE="${OUTPUT}/result.txt" +export RESULT_FILE +REFERENCE_PATH="/dev/snd" +SKIP_INSTALL="False" + +usage() { + echo "Usage: $0 [-s ] [-p ]" 1>&2 + exit 1 +} + +while getopts "s:p:h" o; do + case "$o" in + s) SKIP_INSTALL="${OPTARG}" ;; + p) REFERENCE_PATH="${OPTARG}" ;; + h|*) usage ;; + esac +done + +install() { + dist_name + # shellcheck disable=SC2154 + case "${dist}" in + debian|ubuntu) install_deps "alsa-utils" "${SKIP_INSTALL}";; + fedora|centos) install_deps "alsa-utils" "${SKIP_INSTALL}";; + unknown) warn_msg "Unsupported distro: package install skipped" ;; + esac +} + +run() { + # shellcheck disable=SC3043 + local test_command="$1" + # shellcheck disable=SC3043 + local test_name="$2" + # shellcheck disable=SC2086 + if command -v ${test_command}; then + # shellcheck disable=SC2086 + if ${test_command} -l | grep "card [0-9]"; then + report_pass "${test_name}_devices" + else + report_fail "${test_name}_devices" + fi + else + # shellcheck disable=SC2086 + # shellcheck disable=SC2012 + DEVICES=$(find ${REFERENCE_PATH} -type c -name "controlC*" | wc -l) + if [ "${DEVICES}" -gt 0 ]; then + report_pass "${test_name}_devices" + else + report_fail "${test_name}_devices" + fi + fi +} + +# Test run. +create_out_dir "${OUTPUT}" + +install + +run aplay playback +run arecord record diff --git a/automated/linux/alsa-smoke/alsa-smoke.yaml b/automated/linux/alsa-smoke/alsa-smoke.yaml new file mode 100644 index 000000000..0fcb4775b --- /dev/null +++ b/automated/linux/alsa-smoke/alsa-smoke.yaml @@ -0,0 +1,28 @@ +metadata: + format: Lava-Test Test Definition 1.0 + name: alsa-smoke + description: "Run alsa smoke tests. The test checks if + there are playback and record devices available. + No actual playback or record tests are performed." + maintainer: + - milosz.wasilewski@oss.qualcomm.com + os: + - debian + - ubuntu + - centos + - fedora + - openembedded + scope: + - functional + devices: + - imx8mm-lpddr4-evk + +params: + SKIP_INSTALL: "False" + REFERENCE_PATH: "/dev/snd" + +run: + steps: + - cd ./automated/linux/alsa-smoke/ + - ./alsa-smoke.sh -s "${SKIP_INSTALL}" -p "${REFERENCE_PATH}" + - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/bootrr/bootrr.sh b/automated/linux/bootrr/bootrr.sh new file mode 100755 index 000000000..bd03093d0 --- /dev/null +++ b/automated/linux/bootrr/bootrr.sh @@ -0,0 +1,48 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2025 Qualcomm Inc. + +# shellcheck disable=SC1091 +. ../../lib/sh-test-lib +BOARD="" +REPOSITORY="https://github.com/linux-msm/bootrr" +SKIP_INSTALL="true" + +usage() { + echo "Usage: $0 [-b ] [-r ] [-s ]" 1>&2 + exit 1 +} + +while getopts "b:r:s:" o; do + case "$o" in + b) BOARD="${OPTARG}" ;; + r) REPOSITORY="${OPTARG}" ;; + s) SKIP_INSTALL="${OPTARG}" ;; + *) usage ;; + esac +done + +install() { + install_deps git "${SKIP_INSTALL}" + git clone "${REPOSITORY}" bootrr + cd bootrr || error_msg "bootrr cloning failed" + make DESTDIR=/ install +} + +! check_root && error_msg "This script must be run as root" + +if [ "${SKIP_INSTALL}" = "false" ] || [ "${SKIP_INSTALL}" = "False" ] || [ "${SKIP_INSTALL}" = "FALSE" ]; then + install +fi + +if [ -z "${BOARD}" ]; then + # bootrr tests are executed based on DTB + bootrr +else + # run tests for board that might not be compatible + BOOTRR_DIR="/usr/libexec/bootrr" + PATH="${BOOTRR_DIR}/helpers:${PATH}" + if [ -x "${BOOTRR_DIR}/boards/${BOARD}" ]; then + ${BOOTRR_DIR}/boards/"${BOARD}" + fi +fi diff --git a/automated/linux/bootrr/bootrr.yaml b/automated/linux/bootrr/bootrr.yaml index 084a0e3e4..c019955eb 100644 --- a/automated/linux/bootrr/bootrr.yaml +++ b/automated/linux/bootrr/bootrr.yaml @@ -1,9 +1,9 @@ metadata: format: Lava-Test Test Definition 1.0 name: bootrr - description: "Run bootrr https://github.com/andersson/bootrr" + description: "Run bootrr https://github.com/linux-msm/bootrr" maintainer: - - milosz.wasilewski@linaro.org + - milosz.wasilewski@oss.qualcomm.com - chase.qi@linaro.org os: - debian @@ -17,17 +17,16 @@ metadata: - apq8016-sbc - dragonboard-410c - dragonboard-820c + - qcs6490-rb3gen2 params: - # Refer to https://github.com/andersson/bootrr/tree/master/boards for boards supported. - BOARD: "arrow,apq8096-db820c" - SKIP_INSTALL: false + # Refer to https://github.com/linux-msm/bootrr/tree/master/boards for boards supported. + BOARD: "" + REPOSITORY: "https://github.com/linux-msm/bootrr" + SKIP_INSTALL: true run: steps: - - . ./automated/lib/sh-test-lib - - install_deps git "${SKIP_INSTALL}" - - git clone https://github.com/andersson/bootrr - - cd bootrr - - export PATH=$PWD/helpers:$PATH - - ./boards/${BOARD} + - cd automated/linux/bootrr + # bootrr produces LAVA friendly output + - ./bootrr.sh -r "${REPOSITORY}" -s "S{SKIP_INSTALL}" -b "${BOARD}" diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.sh b/automated/linux/cyclicdeadline/cyclicdeadline.sh index 20e584b46..e505ea025 100755 --- a/automated/linux/cyclicdeadline/cyclicdeadline.sh +++ b/automated/linux/cyclicdeadline/cyclicdeadline.sh @@ -7,27 +7,32 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/cyclicdeadline.json" +LOGFILE="${OUTPUT}/cyclicdeadline" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" INTERVAL="1000" STEP="500" THREADS="1" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 +USER_BASELINE="" usage() { - echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-i interval] [-s step] [-t threads] [-D duration ] [-w background_cmd] [-I iterations] [-x user_baseline]" 1>&2 exit 1 } -while getopts ":i:s:t:D:w:" opt; do +while getopts ":i:s:t:D:w:I:x:" opt; do case "${opt}" in i) INTERVAL="${OPTARG}" ;; s) STEP="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; + I) ITERATIONS="${OPTARG}" ;; + x) USER_BASELINE="${OPTARG}" ;; *) usage ;; esac done @@ -48,11 +53,61 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" \ - -D "${DURATION}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" \ + -D "${DURATION}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py cyclicdeadline "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py cyclicdeadline "${LOGFILE}-${i}.json" \ + | tee "${TMP_RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" + fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" +done + +if [ "${ITERATIONS}" -gt 2 ]; then + max_latencies_file="${OUTPUT}/max_latencies.txt" + + # Extract all max-latency values into a file + grep "max-latency" "${RESULT_FILE}" | grep "^iteration-" | awk '{ print $(NF-1) }' |tee "${max_latencies_file}" + + if [ ! -s "${max_latencies_file}" ]; then + echo "No max-latency values found!" + report_fail "rt-tests-cyclicdeadline" + exit 1 + fi + + if [ -n "${USER_BASELINE}" ]; then + echo "Using user-provided baseline: ${USER_BASELINE}" + min_latency="${USER_BASELINE}" + else + # Find the minimum latency + min_latency=$(sort -n "${max_latencies_file}" | head -n1) + fi + + fail_count=0 + while read -r val; do + is_greater=$(echo "$val > $min_latency" | bc -l) + if [ "$is_greater" -eq 1 ]; then + fail_count=$((fail_count + 1)) + fi + done < "${max_latencies_file}" + + fail_limit=$((ITERATIONS / 2)) + + echo "Max allowed failures: $fail_limit" + echo "Actual failures: $fail_count" + echo "Number of max latencies above baseline ($min_latency) : $fail_count" + + if [ "$fail_count" -ge "$fail_limit" ]; then + report_fail "rt-tests-cyclicdeadline" + else + report_pass "rt-tests-cyclicdeadline" + fi +fi diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.yaml b/automated/linux/cyclicdeadline/cyclicdeadline.yaml index 6767399b9..6b9de2de2 100644 --- a/automated/linux/cyclicdeadline/cyclicdeadline.yaml +++ b/automated/linux/cyclicdeadline/cyclicdeadline.yaml @@ -45,10 +45,12 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 + USER_BASELINE: "" run: steps: - cd ./automated/linux/cyclicdeadline/ - - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./cyclicdeadline.sh -i "${INTERVAL}" -s "${STEP}" -t "${THREADS}" -D "${DURATION}" -I "${ITERATIONS}" -w "${BACKGROUND_CMD}" -x "${USER_BASELINE}" - ../../utils/upload-to-artifactorial.sh -a "output/cyclicdeadline.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/docker-networking/docker-networking.sh b/automated/linux/docker-networking/docker-networking.sh index 51c6d6ce9..b5c45abd0 100755 --- a/automated/linux/docker-networking/docker-networking.sh +++ b/automated/linux/docker-networking/docker-networking.sh @@ -7,16 +7,23 @@ RESULT_FILE="${OUTPUT}/result.txt" export RESULT_FILE IMAGE="alpine:latest" SKIP_INSTALL="True" +NETWORK_TYPE="bridge" +HOST_INTERFACE="eth0" usage() { - echo "$0 [-i ] [-s true|false]" 1>&2 + echo "$0 [-i ] [-n ] [-s true|false] [-b eth0]" 1>&2 + echo " -n option can be a combination of bridge, host and none." 1>&2 + echo " Options should be space separated." 1>&2 + echo " In case there are more than one, all tests will be executed." 1>&2 exit 1 } -while getopts "i:s:h" o; do +while getopts "i:s:n:b:h" o; do case "$o" in i) IMAGE="${OPTARG}" ;; s) SKIP_INSTALL="${OPTARG}";; + n) NETWORK_TYPE="${OPTARG}";; + b) HOST_INTERFACE="${OPTARG}";; h|*) usage ;; esac done @@ -43,48 +50,77 @@ install_docker() { esac } +remove_one_from_skiplist() { + echo "$1" | cut -f2- -d" " +} + if [ "${SKIP_INSTALL}" = "True" ] || [ "${SKIP_INSTALL}" = "true" ]; then info_msg "Installation skipped" else install_docker fi + +HOST_IP=$(ip addr show dev "${HOST_INTERFACE}" | grep "inet " | awk '{ print $2 }' | awk -F "/" '{print $1}') + # verify that docker is available -skip_list="docker-network-list docker-start-container docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" +for NETWORK in ${NETWORK_TYPE}; do + skip_list="${skip_list} docker-network-list-${NETWORK} docker-start-container-${NETWORK} docker-network-inspect-${NETWORK} docker-network-${NETWORK} ping-container-test-${NETWORK} docker-kill-container-${NETWORK} docker-ping-host-network-${NETWORK}" +done docker --version exit_on_fail "docker-version" "${skip_list}" -# check if bridge network is present -skip_list="docker-start-container docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -docker network ls -f name=bridge | grep bridge -exit_on_fail "docker-network-list" "${skip_list}" - -# start simple alpine container -skip_list="docker-network-inspect docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -docker run --name ping_test_container --rm -d "${IMAGE}" /bin/sleep 90 -exit_on_fail "docker-start-container" "${skip_list}" - -# container should join bridge network -skip_list="docker-network-bridge ping-container-test docker-kill-container docker-ping-localhost-host-network" -DOCKER_INSPECT=$(docker network inspect bridge) -exit_on_fail "docker-network-inspect" "${skip_list}" - -echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][]' -IP_ADDR=$(echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][] | select(.Name=="ping_test_container") | .IPv4Address | split("/")[0]') -echo "${IP_ADDR}" -if [ -n "$IP_ADDR" ]; then - report_pass "docker-network-bridge" - eval "ping -c4 $IP_ADDR" - check_return "ping-container-test" -else - report_fail "docker-network-bridge" - report_skip "ping-container-test" -fi +for NETWORK in ${NETWORK_TYPE}; do + # check if bridge network is present + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker network ls -f name="${NETWORK}" | grep "${NETWORK}" + exit_on_fail "docker-network-list" "${skip_list}" + + # start simple alpine container + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker run --name ping_test_container --network "${NETWORK}" --rm -d "${IMAGE}" /bin/sleep 90 + exit_on_fail "docker-start-container" "${skip_list}" -docker kill ping_test_container -check_return "docker-kill-container" + # container should join NETWORK network + skip_list=$(remove_one_from_skiplist "${skip_list}") + DOCKER_INSPECT=$(docker network inspect "${NETWORK}") + exit_on_fail "docker-network-inspect-${NETWORK}" "${skip_list}" -# IPv4 try pinging localhost from container with host networking -docker run --name ping_localhost_host_network --rm -d "${IMAGE}" ping -4 -c 4 localhost -check_return "docker-ping-localhost-host-network" + skip_list=$(remove_one_from_skiplist "${skip_list}") + if [ "${NETWORK}" = "bridge" ]; then + echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][]' + IP_ADDR=$(echo "$DOCKER_INSPECT" | jq '.[0]["Containers"][] | select(.Name=="ping_test_container") | .IPv4Address | split("/")[0]') + echo "${IP_ADDR}" + if [ -n "$IP_ADDR" ]; then + report_pass "docker-network-${NETWORK}" + eval "ping -c4 $IP_ADDR" + skip_list=$(remove_one_from_skiplist "${skip_list}") + check_return "ping-container-test-${NETWORK}" + else + report_fail "docker-network-${NETWORK}" + skip_list=$(remove_one_from_skiplist "${skip_list}") + report_skip "ping-container-test-${NETWORK}" + fi + else + report_pass "docker-network-${NETWORK}" + skip_list=$(remove_one_from_skiplist "${skip_list}") + report_skip "ping-container-test-${NETWORK}" + fi + skip_list=$(remove_one_from_skiplist "${skip_list}") + docker kill ping_test_container + check_return "docker-kill-container-${NETWORK}" + + skip_list=$(remove_one_from_skiplist "${skip_list}") + if [ -n "${HOST_IP}" ]; then + xfail="" + if [ "${NETWORK}" = none ]; then + # ping should fail with disabled networking + xfail="xfail" + fi + docker run --name ping_localhost_host_network --network "${NETWORK}" --rm "${IMAGE}" ping -4 -c 4 "${HOST_IP}" + check_return "docker-ping-host-network-${NETWORK}" "${xfail}" + else + report_skip "docker-ping-host-network-${NETWORK}" + fi +done exit 0 diff --git a/automated/linux/docker-networking/docker-networking.yaml b/automated/linux/docker-networking/docker-networking.yaml index 9a8fda39e..960def16c 100644 --- a/automated/linux/docker-networking/docker-networking.yaml +++ b/automated/linux/docker-networking/docker-networking.yaml @@ -26,9 +26,13 @@ params: # Docker image. IMAGE: "alpine:latest" SKIP_INSTALL: "true" + # NETWORK can be any of "bridge", "host", "none" + # or it can be a combination of them, like "bridge host" + NETWORK: "bridge" + HOST_INTERFACE: "eth0" run: steps: - cd ./automated/linux/docker-networking/ - - ./docker-networking.sh -i "${IMAGE}" -s "${SKIP_INSTALL}" + - ./docker-networking.sh -i "${IMAGE}" -s "${SKIP_INSTALL}" -n "${NETWORK}" -b "${HOST_INTERFACE}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/kselftest/kselftest.sh b/automated/linux/kselftest/kselftest.sh index 6d598a69e..6e1bba33e 100755 --- a/automated/linux/kselftest/kselftest.sh +++ b/automated/linux/kselftest/kselftest.sh @@ -189,9 +189,18 @@ cp kselftest-list.txt kselftest-list.txt.orig echo "skiplist:" echo "========================================" while read -r skip_regex; do - echo "$skip_regex" - # Remove matching tests from list of tests to run and report it as skipped - perl -i -ne 'if (s|^('"${skip_regex}"')$|\1 skip|) { print STDERR; } else { print; }' kselftest-list.txt 2>>"${RESULT_FILE}" + subsuite="${skip_regex%%:*}" + + # Loop through each subsuite in TST_CMDFILES and compare + for selected in ${TST_CMDFILES}; do + if [ "${subsuite}" = "${selected}" ]; then + echo "$skip_regex" + # Remove matching tests from list of tests to run and report it as skipped + perl -i -ne 'if (s|^('"${skip_regex}"')$|\1 skip|) { print STDERR; } else { print; }' \ + kselftest-list.txt 2>>"${RESULT_FILE}" + break + fi + done done < "${skips}" echo "========================================" rm -f "${skips}" diff --git a/automated/linux/kselftest/parse-output.py b/automated/linux/kselftest/parse-output.py index 976a1d5cd..21f20738c 100755 --- a/automated/linux/kselftest/parse-output.py +++ b/automated/linux/kselftest/parse-output.py @@ -8,7 +8,7 @@ def slugify(line): non_ascii_pattern = r"[^A-Za-z0-9_-]+" line = re.sub(r"\[\d{1,5}\]", "", line) return re.sub( - r"_-", "_", re.sub(r"(^_|_$)", "", re.sub(non_ascii_pattern, "_", line)) + r"_-", "__", re.sub(r"(^_|_$)", "", re.sub(non_ascii_pattern, "_", line)) ) @@ -40,18 +40,24 @@ def make_result(ok, skip): output = "" ps = parser.Parser() + test_name = None for l in ps.parse_text(string): if l.category == "test": + test_name = make_name(l.description, l.directive.text, l.ok, l.skip) results.append( { "name": make_name(l.description, l.directive.text, l.ok, l.skip), "result": make_result(l.ok, l.skip), "children": parse_nested_tap(output), + "logs": f"{l.directive.text}", } ) output = "" elif l.category == "diagnostic": output += f"{uncomment(l.text)}\n" + for r in results: + if r["name"] == test_name and not None: + r["logs"] += f"{uncomment(l.text)}\n" return results @@ -61,7 +67,8 @@ def flatten_results(prefix, results): for r in results: test = f"{prefix}{r['name']}" children = flatten_results(f"{test}_", r["children"]) - ret += children + [{"name": test, "result": r["result"]}] + output = r["logs"] + ret += children + [{"name": test, "result": r["result"], "logs": output}] return ret @@ -74,9 +81,22 @@ def make_names_unique(results): r["name"] += f"_dup{namecounts[name]}" +def make_log_files(results): + for r in results: + name = r["name"] + if r["result"] == "fail": + try: + log_file = open(f"output/{name}.log", "w") + log_file.writelines(r["logs"]) + log_file.close() + except OSError as e: + print(f"Error writing to file output/{name}.log: {e}") + + if __name__ == "__main__": results = parse_nested_tap(sys.stdin.read()) results = flatten_results("", results) make_names_unique(results) + make_log_files(results) for r in results: print(f"{r['name']} {r['result']}") diff --git a/automated/linux/ltp/ltp.sh b/automated/linux/ltp/ltp.sh index d1b03dc05..f31117389 100755 --- a/automated/linux/ltp/ltp.sh +++ b/automated/linux/ltp/ltp.sh @@ -31,6 +31,7 @@ SHARD_NUMBER=1 SHARD_INDEX=1 RUNNER="" +KIRK_WORKERS=1 LTP_TMPDIR=/ltp-tmp @@ -58,7 +59,7 @@ usage() { exit 0 } -while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:" arg; do +while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:w:" arg; do case "$arg" in T) TST_CMDFILES="${OPTARG}" @@ -135,6 +136,9 @@ while getopts "M:T:S:b:d:g:e:i:s:v:R:r:u:p:t:c:n:" arg; do n) SHARD_NUMBER="$OPTARG" ;; + w) + KIRK_WORKERS="$OPTARG" + ;; *) usage error_msg "No flag ${OPTARG}" @@ -163,6 +167,9 @@ parse_ltp_output() { parse_ltp_json_results() { jq -r '.results| .[]| "\(.test_fqn) \(.test.result)"' "$1" \ | sed 's/brok/fail/; s/conf/skip/' >> "${RESULT_FILE}" + for test_fqn in $(jq -r '.results| .[]| select(.test.result != "pass") | .test_fqn' "$1"); do + jq -r '.results | .[] | select(.test_fqn == "'"${test_fqn}"'") | .test.log' "$1" > ${OUTPUT}/${test_fqn}.log + done } # Run LTP test suite @@ -187,12 +194,18 @@ run_ltp() { if [ $? -ne "0" ]; then error_msg "${RUNNER} is not installed into the file system." fi + if [ "${KIRK_WORKERS}" = "max" ]; then + KIRK_WORKERS=$(grep ^processor /proc/cpuinfo | wc -l) + fi pipe0_status "${RUNNER} --framework ltp --run-suite shardfile \ -d ${LTP_TMPDIR} --env LTP_COLORIZE_OUTPUT=0 \ - --skip-file ${SKIPFILE_PATH} \ - --json-report /tmp/kirk-report.json \ - --verbose" "tee ${OUTPUT}/LTP_${LOG_FILE}.out" + ${SKIPFILE_PATH:+--skip-file} ${SKIPFILE_PATH} \ + ${KIRK_WORKERS:+--workers} ${KIRK_WORKERS} \ + --no-colors \ + --json-report /tmp/kirk-report.json" \ + "tee ${OUTPUT}/LTP_${LOG_FILE}.out" parse_ltp_json_results "/tmp/kirk-report.json" + rm "/tmp/kirk-report.json" else pipe0_status "./runltp -p -q -f shardfile \ -l ${OUTPUT}/LTP_${LOG_FILE}.log \ diff --git a/automated/linux/ltp/ltp.yaml b/automated/linux/ltp/ltp.yaml index e6c1b2a01..2d49e9b87 100644 --- a/automated/linux/ltp/ltp.yaml +++ b/automated/linux/ltp/ltp.yaml @@ -41,9 +41,13 @@ params: # New kirk runner (https://github.com/linux-test-project/kirk.git) # Needs to be installed onto the rootfs. - # Set RUNNER to full path to kik or to kirk if its in the PATH. + # Set RUNNER to full path to kirk or to kirk if its in the PATH. RUNNER: "" + # Number of parallel test workers to use with the new KIRK runner. + # # Tests can now run concurrently by specifying '--workers X'. + KIRK_WORKERS: 1 + # If the following parameter is set, then the LTP suite is # cloned and used unconditionally. In particular, the version # of the suite is set to the commit pointed to by the @@ -76,5 +80,5 @@ params: run: steps: - cd ./automated/linux/ltp/ - - ./ltp.sh -T "${TST_CMDFILES}" -s "${SKIP_INSTALL}" -v "${LTP_VERSION}" -M "${TIMEOUT_MULTIPLIER}" -R "${ROOT_PASSWD}" -r "${RUNNER}" -b "${BOARD}" -d "${LTP_TMPDIR}" -g "${BRANCH}" -e "${ENVIRONMENT}" -i "${LTP_INSTALL_PATH}" -S "${SKIPFILE}" -p "${TEST_DIR}" -u "${TEST_GIT_URL}" -t "${BUILD_FROM_TAR}" -n "${SHARD_NUMBER}" -c "${SHARD_INDEX}" + - ./ltp.sh -T "${TST_CMDFILES}" -s "${SKIP_INSTALL}" -v "${LTP_VERSION}" -M "${TIMEOUT_MULTIPLIER}" -R "${ROOT_PASSWD}" -r "${RUNNER}" -b "${BOARD}" -d "${LTP_TMPDIR}" -g "${BRANCH}" -e "${ENVIRONMENT}" -i "${LTP_INSTALL_PATH}" -S "${SKIPFILE}" -p "${TEST_DIR}" -u "${TEST_GIT_URL}" -t "${BUILD_FROM_TAR}" -n "${SHARD_NUMBER}" -c "${SHARD_INDEX}" -w "${KIRK_WORKERS}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/modules/modules.sh b/automated/linux/modules/modules.sh index b6e5e8189..68bd89af8 100755 --- a/automated/linux/modules/modules.sh +++ b/automated/linux/modules/modules.sh @@ -38,12 +38,41 @@ while getopts "c:d:i:l:n:s:h" o; do esac done +get_mem_usage_kb() { + grep -i "MemAvailable:" /proc/meminfo | awk '{ print $2 }' +} + +compare_memory_usage() { + local before_kb=$1 + local after_kb=$2 + local module=$3 + local diff_kb + diff_kb=$((before_kb - after_kb)) + echo "memcheck: before ${before_kb}, after ${after_kb}, diff ${diff_kb}" + if [ "$diff_kb" -lt 0 ]; then + report_fail "memcheck_${module}" + else + report_pass "memcheck_${module}" + fi +} + get_modules_list() { if [ -z "${MODULES_LIST}" ]; then - subdir=$(echo "${MODULES_SUBDIRS}" | tr ' ' '|') - skiplist=$(echo "${SKIPLIST}" | tr ' ' '|') - grep -E "kernel/(${subdir})" /lib/modules/"$(uname -r)"/modules.order | tee /tmp/find_modules.txt - grep -E -v "(${skiplist})" /tmp/find_modules.txt | tee /tmp/modules_to_run.txt + if [ -n "${MODULES_SUBDIRS}" ]; then + subdir=$(echo "${MODULES_SUBDIRS}" | tr ' ' '|') + grep -E "kernel/(${subdir})" /lib/modules/"$(uname -r)"/modules.order > /tmp/find_modules.txt + else + # No subdir given, default to all modules + cat /lib/modules/"$(uname -r)"/modules.order > /tmp/find_modules.txt + fi + + if [ -n "${SKIPLIST}" ]; then + skiplist=$(echo "${SKIPLIST}" | tr ' ' '|') + grep -E -v "(${skiplist})" /tmp/find_modules.txt > /tmp/modules_to_run.txt + else + cp /tmp/find_modules.txt /tmp/modules_to_run.txt + fi + split --verbose --numeric-suffixes=1 -n l/"${SHARD_INDEX}"/"${SHARD_NUMBER}" /tmp/modules_to_run.txt > /tmp/shardfile echo "============== Tests to run ===============" cat /tmp/shardfile @@ -76,18 +105,57 @@ report() { fi } +scan_dmesg_for_errors() { + echo "=== Scanning dmesg for errors ===" + dmesg -l 0,1,2,3,4,5 | grep -Ei "BUG:|WARNING:|Oops:|Call Trace:" && report_fail "dmesg_error_scan" || report_pass "dmesg_error_scan" +} + +check_module_unloaded() { + local _module="$1" + if lsmod | grep "^${_module} " > /dev/null; then + echo "Module ${_module} still loaded after removal!" + report_fail "module_stuck_${_module}" + else + report_pass "module_unloaded_${_module}" + fi +} + +kmemleak_scan() { + if [ -e /sys/kernel/debug/kmemleak ]; then + echo "Triggering kmemleak scan..." + echo scan > /sys/kernel/debug/kmemleak + sleep 5 + if grep -q . /sys/kernel/debug/kmemleak; then + echo "Potential memory leaks detected:" + cat /sys/kernel/debug/kmemleak + report_fail "kmemleak_detected" + else + report_pass "kmemleak_no_leaks" + fi + else + echo "kmemleak not available, skipping scan." + fi +} + run () { for module in ${MODULES_LIST}; do # don't insert/remove modules that is already inserted. if ! lsmod | grep "^${module}"; then for num in $(seq "${MODULE_MODPROBE_NUMBER}"); do dmesg -C + mem_before=$(get_mem_usage_kb) report "" "${module}" "insert" "${num}" echo echo "modinfo ${module}" modinfo "${module}" + scan_dmesg_for_errors + report "--remove" "${module}" "remove" "${num}" - dmesg -l 0,1,2,3,4,5 + scan_dmesg_for_errors + + check_module_unloaded "${module}" + mem_after=$(get_mem_usage_kb) + compare_memory_usage "$mem_before" "$mem_after" "$module" done fi done @@ -100,3 +168,4 @@ info_msg "Output directory: ${OUTPUT}" info_msg "About to run load/unload kernel modules ..." get_modules_list run +kmemleak_scan diff --git a/automated/linux/pi-stress/pi-stress.sh b/automated/linux/pi-stress/pi-stress.sh index 6ad0d179f..86a9b9499 100755 --- a/automated/linux/pi-stress/pi-stress.sh +++ b/automated/linux/pi-stress/pi-stress.sh @@ -7,26 +7,31 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/pi-stress.json" +LOGFILE="${OUTPUT}/pi-stress" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" export RESULT_FILE DURATION="5m" MLOCKALL="false" RR="false" BACKGROUND_CMD="" +ITERATIONS=1 +USER_BASELINE="" usage() { - echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-D runtime] [-m ] [-r ] [-w background_cmd] [-i iterations] [-x user baseline]" 1>&2 exit 1 } -while getopts ":D:m:r:w" opt; do +while getopts ":D:m:r:w:i:x:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; m) MLOCKALL="${OPTARG}" ;; r) RR="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; + x) USER_BASELINE="${OPTARG}" ;; *) usage ;; esac done @@ -57,10 +62,60 @@ background_process_start bgcmd --cmd "${BACKGROUND_CMD}" # test script. Catch and ignore it with trap. trap '' TERM # shellcheck disable=SC2086 -"${binary}" -q --duration "${DURATION}" ${MLOCKALL} ${RR} --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q --duration "${DURATION}" ${MLOCKALL} ${RR} --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py pi-stress "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py pi-stress "${LOGFILE}-${i}.json" \ + | tee "${TMP_RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" + fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" +done + +if [ "${ITERATIONS}" -gt 2 ]; then + max_inversions_file="${OUTPUT}/max_inversions.txt" + + # Extract all inversion values into a file + grep "inversion" "${RESULT_FILE}" | grep "^iteration-" | awk '{ print $(NF-1) }' |tee "${max_inversions_file}" + + if [ ! -s "${max_inversions_file}" ]; then + echo "No inversion values found!" + report_fail "rt-tests-pi-stress" + exit 1 + fi + + # Find the minimum inversion + if [ -n "${USER_BASELINE}" ]; then + max_inversion="${USER_BASELINE}" + echo "Using user-provided user_baseline: ${max_inversion}" + else + max_inversion=$(sort -n "${max_inversions_file}" | tail -n1) + echo "Calculated max_inversion: ${max_inversion}" + fi + + fail_count=0 + while read -r val; do + is_less=$(echo "$val < $max_inversion" | bc -l) + if [ "$is_less" -eq 1 ]; then + fail_count=$((fail_count + 1)) + fi + done < "${max_inversions_file}" + + fail_limit=$((ITERATIONS / 2)) + + echo "Max allowed failures: $fail_limit" + echo "Actual failures: $fail_count" + + if [ "$fail_count" -ge "$fail_limit" ]; then + report_fail "rt-tests-pi-stress" + else + report_pass "rt-tests-pi-stress" + fi +fi diff --git a/automated/linux/pi-stress/pi-stress.yaml b/automated/linux/pi-stress/pi-stress.yaml index 64a5b784f..2215964e2 100644 --- a/automated/linux/pi-stress/pi-stress.yaml +++ b/automated/linux/pi-stress/pi-stress.yaml @@ -40,10 +40,12 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 + USER_BASELINE: "" run: steps: - cd automated/linux/pi-stress - - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -w "${BACKGROUND_CMD}" + - ./pi-stress.sh -D "${DURATION}" -m "${MLOCKALL}" -r "${RR}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" -x "${USER_BASELINE}" - ../../utils/upload-to-artifactorial.sh -a "output/pi-stress.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/pmqtest/pmqtest.sh b/automated/linux/pmqtest/pmqtest.sh index 57ecfba5a..5edd6de89 100755 --- a/automated/linux/pmqtest/pmqtest.sh +++ b/automated/linux/pmqtest/pmqtest.sh @@ -6,20 +6,24 @@ TEST_DIR=$(dirname "$(realpath "$0")") OUTPUT="${TEST_DIR}/output" -LOGFILE="${OUTPUT}/pmqtest.json" +LOGFILE="${OUTPUT}/pmqtest" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" + DURATION="5m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-D duration] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-D duration] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":D:w:" opt; do +while getopts ":D:w:i:" opt; do case "${opt}" in D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -38,10 +42,19 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -S -p 98 -D "${DURATION}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -S -p 98 -D "${DURATION}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py pmqtest "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py pmqtest "${LOGFILE}-${i}.json" \ + | tee "${TMP_RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" + fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" +done diff --git a/automated/linux/pmqtest/pmqtest.yaml b/automated/linux/pmqtest/pmqtest.yaml index b23b5437f..0987152ed 100644 --- a/automated/linux/pmqtest/pmqtest.yaml +++ b/automated/linux/pmqtest/pmqtest.yaml @@ -35,10 +35,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/pmqtest/ - - ./pmqtest.sh -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./pmqtest.sh -D "${DURATION}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/pmqtest.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.sh b/automated/linux/rt-migrate-test/rt-migrate-test.sh index a11eda147..c3ab0af78 100755 --- a/automated/linux/rt-migrate-test/rt-migrate-test.sh +++ b/automated/linux/rt-migrate-test/rt-migrate-test.sh @@ -5,22 +5,26 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/rt-migrate-test.json" +LOGFILE="${OUTPUT}/rt-migrate-test" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" + PRIORITY="51" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-p priority] [-D duration] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-p priority] [-D duration] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":l:p:D:w:" opt; do +while getopts ":l:p:D:w:i:" opt; do case "${opt}" in - p) PRIORITY="${OPTARG}" ;; - D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + p) PRIORITY="${OPTARG}" ;; + D) DURATION="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -37,10 +41,20 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -p "${PRIORITY}" -D "${DURATION}" -c --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -p "${PRIORITY}" -D "${DURATION}" -c --json="${LOGFILE}-${i}.json" +done + background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py rt-migrate-test "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py rt-migrate-test "${LOGFILE}-${i}.json" \ + | tee "${TMP_RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" + fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" +done diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.yaml b/automated/linux/rt-migrate-test/rt-migrate-test.yaml index 8646ef6d5..6674447cb 100644 --- a/automated/linux/rt-migrate-test/rt-migrate-test.yaml +++ b/automated/linux/rt-migrate-test/rt-migrate-test.yaml @@ -37,10 +37,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/rt-migrate-test/ - - ./rt-migrate-test.sh -p "${PRIORITY}" -D "${DURATION}" -w "${BACKGROUND_CMD}" + - ./rt-migrate-test.sh -p "${PRIORITY}" -D "${DURATION}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/rt-migrate-test.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/signaltest/signaltest.sh b/automated/linux/signaltest/signaltest.sh index e7db4f3a2..578527aa2 100755 --- a/automated/linux/signaltest/signaltest.sh +++ b/automated/linux/signaltest/signaltest.sh @@ -5,25 +5,28 @@ . ../../lib/sh-test-lib OUTPUT="$(pwd)/output" -LOGFILE="${OUTPUT}/signaltest.json" +LOGFILE="${OUTPUT}/signaltest" RESULT_FILE="${OUTPUT}/result.txt" +TMP_RESULT_FILE="${OUTPUT}/tmp_result.txt" PRIORITY="98" THREADS="2" DURATION="1m" BACKGROUND_CMD="" +ITERATIONS=1 usage() { - echo "Usage: $0 [-r runtime] [-p priority] [-t threads] [-w background_cmd]" 1>&2 + echo "Usage: $0 [-r runtime] [-p priority] [-t threads] [-w background_cmd] [-i iterations]" 1>&2 exit 1 } -while getopts ":p:t:D:w:" opt; do +while getopts ":p:t:D:w:i:" opt; do case "${opt}" in p) PRIORITY="${OPTARG}" ;; t) THREADS="${OPTARG}" ;; - D) DURATION="${OPTARG}" ;; - w) BACKGROUND_CMD="${OPTARG}" ;; + D) DURATION="${OPTARG}" ;; + w) BACKGROUND_CMD="${OPTARG}" ;; + i) ITERATIONS="${OPTARG}" ;; *) usage ;; esac done @@ -44,10 +47,19 @@ fi background_process_start bgcmd --cmd "${BACKGROUND_CMD}" -"${binary}" -q -D "${DURATION}" -a -m -p "${PRIORITY}" -t "${THREADS}" --json="${LOGFILE}" +for i in $(seq ${ITERATIONS}); do + "${binary}" -q -D "${DURATION}" -a -m -p "${PRIORITY}" -t "${THREADS}" --json="${LOGFILE}-${i}.json" +done background_process_stop bgcmd # Parse test log. -../../lib/parse_rt_tests_results.py signaltest "${LOGFILE}" \ - | tee -a "${RESULT_FILE}" +for i in $(seq ${ITERATIONS}); do + ../../lib/parse_rt_tests_results.py signaltest "${LOGFILE}-${i}.json" \ + | tee "${TMP_RESULT_FILE}" + + if [ ${ITERATIONS} -ne 1 ]; then + sed -i "s|^|iteration-${i}-|g" "${TMP_RESULT_FILE}" + fi + cat "${TMP_RESULT_FILE}" | tee -a "${RESULT_FILE}" +done diff --git a/automated/linux/signaltest/signaltest.yaml b/automated/linux/signaltest/signaltest.yaml index 98bc808d2..6307abcfb 100644 --- a/automated/linux/signaltest/signaltest.yaml +++ b/automated/linux/signaltest/signaltest.yaml @@ -41,10 +41,11 @@ params: # Refer to https://validation.linaro.org/static/docs/v2/publishing-artifacts.html ARTIFACTORIAL_URL: "https://archive.validation.linaro.org/artifacts/team/qa/" ARTIFACTORIAL_TOKEN: "" + ITERATIONS: 1 run: steps: - cd ./automated/linux/signaltest - - ./signaltest.sh -D "${DURATION}" -p "${PRIORITY}" -t "${THREADS}" -w "${BACKGROUND_CMD}" + - ./signaltest.sh -D "${DURATION}" -p "${PRIORITY}" -t "${THREADS}" -i "${ITERATIONS}" -w "${BACKGROUND_CMD}" - ../../utils/upload-to-artifactorial.sh -a "output/signaltest.json" -u "${ARTIFACTORIAL_URL}" -t "${ARTIFACTORIAL_TOKEN}" - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/linux/tcpreplay/pcap/generate_pcap.py b/automated/linux/tcpreplay/pcap/generate_pcap.py new file mode 100755 index 000000000..1666b1b49 --- /dev/null +++ b/automated/linux/tcpreplay/pcap/generate_pcap.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +# vim: set ts=8 sw=4 sts=4 et tw=80 fileencoding=utf-8 : +from scapy.all import ( + Ether, + IP, + TCP, + UDP, + ICMP, + DNS, + DNSQR, + DNSRR, + wrpcap, + Raw, + fragment, +) + + +test_expectations = { + "tcp_basic.pcap": "pass", + "tcp_data.pcap": "pass", + "udp_packet.pcap": "pass", + "icmp_ping.pcap": "pass", + "fragmented.pcap": "pass", + "tcp_rst.pcap": "pass", + "tcp_full_cycle.pcap": "pass", + "dns_query_response.pcap": "pass", + "bad_tcp_flags.pcap": "xfail", + "tcp_multistream.pcap": "pass", + "false_positive_noise.pcap": "pass", + "false_positive_overlap.pcap": "pass", + "false_positive_icmp_flood.pcap": "xfail", +} + + +def tcp_basic(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + tcp_syn = TCP(sport=12345, dport=80, flags="S", seq=1000) + tcp_synack = TCP(sport=80, dport=12345, flags="SA", seq=2000, ack=1001) + tcp_ack = TCP(sport=12345, dport=80, flags="A", seq=1001, ack=2001) + wrpcap( + "pcap/tcp_basic.pcap", + [Ether() / ip / tcp_syn, Ether() / ip / tcp_synack, Ether() / ip / tcp_ack], + ) + + +def tcp_data(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + tcp = TCP(sport=12345, dport=80, flags="PA", seq=1, ack=1) + data = Raw(load="GET / HTTP/1.1\r\nHost: test\r\n\r\n") + wrpcap("pcap/tcp_data.pcap", [Ether() / ip / tcp / data]) + + +def udp_packet(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + udp = UDP(sport=1234, dport=1234) + wrpcap("pcap/udp_packet.pcap", [Ether() / ip / udp / Raw(load="hello")]) + + +def icmp_ping(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + wrpcap( + "pcap/icmp_ping.pcap", + [ + Ether() / ip / ICMP(type="echo-request") / b"ping", + Ether() / ip / ICMP(type="echo-reply") / b"pong", + ], + ) + + +def fragmented(): + pkt = IP(dst="10.0.0.1") / UDP(sport=1111, dport=2222) / Raw(load="X" * 3000) + frags = fragment(pkt, fragsize=500) + wrpcap("pcap/fragmented.pcap", [Ether() / f for f in frags]) + + +def tcp_rst(): + ip = IP(src="10.0.0.2", dst="10.0.0.1") + rst = TCP(sport=12345, dport=80, flags="R", seq=1234) + wrpcap("pcap/tcp_rst.pcap", [Ether() / ip / rst]) + + +def tcp_full_cycle(): + eth = Ether(src="00:11:22:33:44:55", dst="66:77:88:99:aa:bb") + ip = IP(src="10.0.0.2", dst="10.0.0.1") + packets = [ + eth / ip / TCP(sport=12345, dport=80, flags="S", seq=1000), + eth / ip / TCP(sport=80, dport=12345, flags="SA", seq=2000, ack=1001), + eth / ip / TCP(sport=12345, dport=80, flags="A", seq=1001, ack=2001), + eth + / ip + / TCP(sport=12345, dport=80, flags="PA", seq=1001, ack=2001) + / b"GET / HTTP/1.1\r\nHost: test\r\n\r\n", + eth + / ip + / TCP(sport=80, dport=12345, flags="PA", seq=2001, ack=1025) + / b"HTTP/1.1 200 OK\r\r\nHi!", + eth / ip / TCP(sport=12345, dport=80, flags="FA", seq=1025, ack=2024), + eth / ip / TCP(sport=80, dport=12345, flags="FA", seq=2024, ack=1026), + eth / ip / TCP(sport=12345, dport=80, flags="A", seq=1026, ack=2025), + ] + wrpcap("pcap/tcp_full_cycle.pcap", packets) + + +def dns_query_response(): + eth = Ether() + ip = IP(src="10.0.0.2", dst="8.8.8.8") + query = UDP(sport=1234, dport=53) / DNS( + id=0xAAAA, qr=0, qd=DNSQR(qname="example.com") + ) + reply = UDP(sport=53, dport=1234) / DNS( + id=0xAAAA, + qr=1, + qd=DNSQR(qname="example.com"), + an=DNSRR(rrname="example.com", rdata="93.184.216.34"), + ) + wrpcap("pcap/dns_query_response.pcap", [eth / ip / query, eth / ip / reply]) + + +def bad_tcp_flags(): + tcp = TCP(sport=1234, dport=80, flags="FPU", seq=1000) + pkt = Ether() / IP(src="10.0.0.2", dst="10.0.0.1") / tcp + pkt[TCP].chksum = 0xFFFF # Force bad checksum + wrpcap("pcap/bad_tcp_flags.pcap", [pkt]) + + +def tcp_multistream(): + eth = Ether() + streams = [] + for i in range(3): + sport = 10000 + i + dst_port = 80 + ip = IP(src="10.0.0.2", dst="10.0.0.1") + syn = TCP(sport=sport, dport=dst_port, flags="S", seq=1000 + i) + ack = TCP(sport=sport, dport=dst_port, flags="A", seq=1001 + i, ack=2001 + i) + data = ( + TCP(sport=sport, dport=dst_port, flags="PA", seq=1001 + i, ack=2001 + i) + / f"GET /stream{i}".encode() + ) + streams.extend([eth / ip / syn, eth / ip / ack, eth / ip / data]) + wrpcap("pcap/tcp_multistream.pcap", streams) + + +def false_positive_noise(): + packets = [] + for i in range(10): + pkt = ( + Ether() + / IP(src=f"192.168.0.{i+10}", dst="10.0.0.1") + / UDP(sport=1234 + i, dport=5678) + / Raw(load="NOISE") + ) + packets.append(pkt) + wrpcap("pcap/false_positive_noise.pcap", packets) + + +def false_positive_overlap(): + packets = [] + for i in range(3): + ip = IP(src=f"10.0.0.{i+3}", dst="10.0.0.1") + tcp = TCP(sport=1000 + i, dport=80, flags="PA", seq=42 + i, ack=1) / Raw( + load=f"benign{i}" + ) + packets.append(Ether() / ip / tcp) + wrpcap("pcap/false_positive_overlap.pcap", packets) + + +def false_positive_icmp_flood(): + packets = [ + Ether() + / IP(src="1.2.3.4", dst="10.0.0.1") + / ICMP(type="echo-request") + / Raw(load="flood") + for _ in range(20) + ] + wrpcap("pcap/false_positive_icmp_flood.pcap", packets) + + +def run_all(): + import os + + os.makedirs("pcap", exist_ok=True) + tcp_basic() + tcp_data() + udp_packet() + icmp_ping() + fragmented() + tcp_rst() + tcp_full_cycle() + dns_query_response() + bad_tcp_flags() + tcp_multistream() + false_positive_noise() + false_positive_overlap() + false_positive_icmp_flood() + print("All .pcap files generated in ./pcap/") + + +if __name__ == "__main__": + run_all() diff --git a/automated/linux/tcpreplay/tcpreplay.py b/automated/linux/tcpreplay/tcpreplay.py new file mode 100755 index 000000000..dd709b84f --- /dev/null +++ b/automated/linux/tcpreplay/tcpreplay.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +# vim: set ts=8 sw=4 sts=4 et tw=80 fileencoding=utf-8 : +import argparse +import glob +import fcntl +import os +import shutil +import struct +import subprocess +import sys +import time + + +def check_root(): + if os.geteuid() != 0: + print("SKIP: Must be run as root to create TAP interfaces") + return False + return True + + +def check_tcpreplay(): + if not shutil.which("tcpreplay"): + print("SKIP: tcpreplay not found in PATH") + return False + return True + + +def create_tap_interface(ifname): + try: + IFF_TAP = 0x0002 + IFF_NO_PI = 0x1000 + TUNSETIFF = 0x400454CA + + tap_fd = os.open("/dev/net/tun", os.O_RDWR) + ifr = struct.pack("16sH", ifname.encode(), IFF_TAP | IFF_NO_PI) + fcntl.ioctl(tap_fd, TUNSETIFF, ifr) + return tap_fd + except Exception as e: + print(f"Error creating TAP interface: {e}") + return None + + +def configure_interface(ifname, ipaddr, mask): + try: + subprocess.run(["ip", "link", "set", ifname, "up"], check=True) + subprocess.run( + ["ip", "addr", "add", f"{ipaddr}/{mask}", "dev", ifname], check=True + ) + return True + except subprocess.CalledProcessError as e: + print(f"Failed to configure interface: {e}") + return False + + +def cleanup_interface(ifname): + try: + subprocess.run(["ip", "link", "set", ifname, "down"], check=True) + print("cleanup_interface: pass") + except subprocess.CalledProcessError: + print("cleanup_interface: fail") + + +def run_tcpreplay(ifname, pcap): + try: + subprocess.run(["tcpreplay", "--intf1", ifname, pcap], check=True) + print("run_tcpreplay: pass") + return True + except subprocess.CalledProcessError: + print("run_tcpreplay: fail") + return False + + +def lava_report(name, result, output_file=None): + line = f"{name}: {result}" + print(line) + if output_file: + os.makedirs(os.path.dirname(output_file), exist_ok=True) + with open(output_file, "a") as f: + f.write(line + "\n") + + +def get_expectation(test_name, default_expectations): + return default_expectations.get(test_name, "pass") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--interface", required=True) + parser.add_argument("--ipaddr", required=True) + parser.add_argument("--mask", default="24") + parser.add_argument("--pcap-dir", required=True) + parser.add_argument("--output", required=True) + global args + args = parser.parse_args() + + if not check_root(): + lava_report("check_root", "skip", args.output) + return + + if not check_tcpreplay(): + lava_report("check_tcpreplay", "skip", args.output) + return + + tap_fd = create_tap_interface(args.interface) + if not tap_fd: + lava_report("create_tap_interface", "fail", args.output) + return + + if not configure_interface(args.interface, args.ipaddr, args.mask): + lava_report("configure_interface", "fail", args.output) + os.close(tap_fd) + return + + default_expectations = { + "tcp_basic": "pass", + "tcp_data": "pass", + "udp_packet": "pass", + "icmp_ping": "pass", + "fragmented": "pass", + "tcp_rst": "pass", + "tcp_full_cycle": "pass", + "dns_query_response": "pass", + "bad_tcp_flags": "xfail", + "tcp_multistream": "pass", + "false_positive_noise": "pass", + "false_positive_overlap": "xfail", + "false_positive_icmp_flood": "xfail", + } + + pcaps = sorted(glob.glob(os.path.join(args.pcap_dir, "*.pcap"))) + for pcap_path in pcaps: + pcap = os.path.basename(pcap_path) + test_name = os.path.splitext(pcap)[0] + expected = get_expectation(test_name, default_expectations) + + try: + success = run_tcpreplay(args.interface, pcap_path) + except Exception as e: + print(f"Exception during tcpreplay for {test_name}: {e}") + success = False + + # Normalize output as requested + if expected == "xfail": + lava_report(f"run_{test_name}", "pass", args.output) + elif success: + lava_report(f"run_{test_name}", "pass", args.output) + else: + lava_report(f"run_{test_name}", "fail", args.output) + + cleanup_interface(args.interface) + os.close(tap_fd) + + +if __name__ == "__main__": + main() diff --git a/automated/linux/tcpreplay/tcpreplay.yaml b/automated/linux/tcpreplay/tcpreplay.yaml new file mode 100644 index 000000000..febc400b2 --- /dev/null +++ b/automated/linux/tcpreplay/tcpreplay.yaml @@ -0,0 +1,34 @@ +metadata: + format: Lava-Test Test Definition 1.0 + name: tcpreplay + description: Replay a PCAP file using tcpreplay and verify via TUN interface + maintainer: + - anders.roxell@linaro.org + os: + - debian + - ubuntu + - centos + - fedora + - openembedded + scope: + - functional + devices: + - juno + - x86 + +params: + # Interface to replay traffic on + INTERFACE: "tun0" + # Path to PCAP directory + PCAP: "./pcap/" + # IP address to assign to the TUN interface + IPADDR: "10.0.0.1" + # Netmask + MASK: "24" + +run: + steps: + - cd automated/linux/tcpreplay/ + - python3 pcap/generate_pcap.py + - python3 tcpreplay.py --interface "${INTERFACE}" --ipaddr "${IPADDR}" --mask "${MASK}" --pcap-dir "${PCAP}" --output ./output/result.txt + - ../../utils/send-to-lava.sh ./output/result.txt diff --git a/automated/utils/send-to-lava.sh b/automated/utils/send-to-lava.sh index 36d678e62..933354751 100755 --- a/automated/utils/send-to-lava.sh +++ b/automated/utils/send-to-lava.sh @@ -1,6 +1,29 @@ #!/bin/sh RESULT_FILE="$1" +RESULT_DIR="$2" +if [ -z "${RESULT_DIR}" ]; then + RESULT_DIR="$(dirname "${RESULT_FILE}")" +fi + +signal() { + if [ -z ${KMSG} ] + then + echo "${1}" + else + echo "<0>${1}" > /dev/kmsg + fi +} + +show_output() { + test_name="$1" + test_output="${RESULT_DIR}/${test_name}.log" + if [ -r "$test_output" ]; then + signal "" + cat "$test_output" + signal "" + fi +} command -v lava-test-case > /dev/null 2>&1 lava_test_case="$?" @@ -13,6 +36,7 @@ if [ -f "${RESULT_FILE}" ]; then test="${line%% *}" result="${line##* }" + show_output "${test}" if [ "${lava_test_case}" -eq 0 ]; then lava-test-case "${test}" --result "${result}" else @@ -24,6 +48,7 @@ if [ -f "${RESULT_FILE}" ]; then measurement="$(echo "${line}" | awk '{print $3}')" units="$(echo "${line}" | awk '{print $4}')" + show_output "${test}" if [ "${lava_test_case}" -eq 0 ]; then if [ -n "${units}" ]; then lava-test-case "${test}" --result "${result}" --measurement "${measurement}" --units "${units}" @@ -40,9 +65,9 @@ if [ -f "${RESULT_FILE}" ]; then lava-test-set "${test_set_status}" "${test_set_name}" else if [ "${test_set_status}" = "start" ]; then - echo "" + signal "" else - echo "" + signal "" fi fi fi diff --git a/automated/utils/test-runner.py b/automated/utils/test-runner.py index d49bef623..751833201 100755 --- a/automated/utils/test-runner.py +++ b/automated/utils/test-runner.py @@ -16,7 +16,6 @@ import time from uuid import uuid4 from datetime import datetime -from distutils.spawn import find_executable try: @@ -844,7 +843,7 @@ def __init__(self, test, args): self.results["version"] = test_version.rstrip() os.chdir(path) self.lava_run = args.lava_run - if self.lava_run and not find_executable("lava-test-case"): + if self.lava_run and not shutil.which("lava-test-case"): self.logger.info( "lava-test-case not found, '-l' or '--lava_run' option ignored'" ) diff --git a/automated/utils/upload-to-squad.sh b/automated/utils/upload-to-squad.sh index 4f3ad8d4b..9dc8c9e03 100755 --- a/automated/utils/upload-to-squad.sh +++ b/automated/utils/upload-to-squad.sh @@ -63,7 +63,7 @@ if command -v lava-test-reference > /dev/null 2>&1; then i=1 while [ $i -le "${RETRY_COUNT}" ]; do # response is the squad testrun id when succeed - response=$(curl ${CURL_VERBOSE_FLAG} --header "Auth-Token: ${SQUAD_ARCHIVE_SUBMIT_TOKEN}" --form "attachment=@${ATTACHMENT}" "${ARTIFACTORIAL_URL}") + response=$(curl ${CURL_VERBOSE_FLAG} --header "Authorization: token ${SQUAD_ARCHIVE_SUBMIT_TOKEN}" --form "attachment=@${ATTACHMENT}" "${ARTIFACTORIAL_URL}") # generate the SQUAD url for download and report pass when uploading succeed if echo "${response}" | grep -E "^[0-9]+$"; then diff --git a/mkdocs_plugin/pyproject.toml b/mkdocs_plugin/pyproject.toml new file mode 100644 index 000000000..bc8a4c7af --- /dev/null +++ b/mkdocs_plugin/pyproject.toml @@ -0,0 +1,30 @@ +[build-system] +requires = [ + "setuptools", + "mkdocs>=1.1", + "tags-macros-plugin@git+https://github.com/mwasilew/mkdocs-plugin-tags.git" +] +build-backend = "setuptools.build_meta" + +[project] +name = "mkdocs-test-definitions-plugin" +version = "1.5" +keywords = [ + "mkdocs", + "python", + "markdown", + "wiki" +] +requires-python = ">=3.5" +license = "GPL-2.0-or-later" + +authors = [ + {name = "Milosz Wasilewski", email = "milosz.wasilewski@oss.qualcomm.com"} +] + +[project.urls] +Repository = "https://github.com/linaro/test-definitions" +GitHub = "https://github.com/linaro/test-definitions" + +[project.entry-points."mkdocs.plugins"] +linaro-test-definitions = "testdefinitionsmkdocs:LinaroTestDefinitionsMkDocsPlugin" diff --git a/mkdocs_plugin/setup.py b/mkdocs_plugin/setup.py deleted file mode 100644 index 6820658d0..000000000 --- a/mkdocs_plugin/setup.py +++ /dev/null @@ -1,33 +0,0 @@ -from setuptools import setup, find_packages - - -setup( - name="mkdocs-test-definitions-plugin", - version="1.5", - description="An MkDocs plugin that converts LAVA test definitions to documentation", - long_description="", - keywords="mkdocs python markdown wiki", - url="https://github.com/linaro/test-definitions", - author="Milosz Wasilewski", - author_email="milosz.wasilewski@linaro.org", - license="GPL", - python_requires=">=3.5", - install_requires=["mkdocs>=1.1", "tags-macros-plugin"], - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Intended Audience :: Information Technology", - "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - ], - packages=find_packages(), - entry_points={ - "mkdocs.plugins": [ - "linaro-test-definitions = testdefinitionsmkdocs:LinaroTestDefinitionsMkDocsPlugin" - ] - }, -) diff --git a/sanity-check.sh b/sanity-check.sh index 1a2fa3e4a..3f5c794d1 100755 --- a/sanity-check.sh +++ b/sanity-check.sh @@ -4,7 +4,8 @@ set -ex python3 validate.py \ -r build-error.txt \ -p E501 W503 E203 \ - -s SC1091 SC2230 SC3043 + -s SC1091 SC2230 SC3043 \ + -l warning # pycodestyle checks skipped: # E501: line too long @@ -18,3 +19,8 @@ python3 validate.py \ # Reason: 'which' is widely used and supported. And 'command' applets isn't # available in busybox, refer to https://busybox.net/downloads/BusyBox.html # SC2230: which is non-standard. Use builtin 'command -v' instead. + +# SC3043: In POSIC 'local' is undefined but it's widely used. For this reason +# it is acceptable to use 'local' in this repository + +# "warning" is the default severity level for shellcheck diff --git a/validate.py b/validate.py index bac8cbfea..aa44eb1af 100755 --- a/validate.py +++ b/validate.py @@ -221,7 +221,7 @@ def validate_yaml(filename, args): return 0 -def validate_shell(filename, ignore_options): +def validate_shell(filename, args): ignore_string = "" if args.shellcheck_ignore is not None: # Exclude types of warnings in the following format: @@ -229,6 +229,7 @@ def validate_shell(filename, ignore_options): ignore_string = "-e %s" % ",".join(args.shellcheck_ignore) if len(ignore_string) < 4: # contains only "-e " ignore_string = "" + ignore_string = "-S %s %s" % (args.shellcheck_level, ignore_string) cmd = "shellcheck %s" % ignore_string return validate_external(cmd, filename, "SHELLCHECK", args) @@ -341,6 +342,14 @@ def main(args): help="Space separated list of shellcheck exclusions", dest="shellcheck_ignore", ) + parser.add_argument( + "-l", + "--shellcheck-level", + default="warning", + help="Shellcheck level set with -S", + dest="shellcheck_level", + ) + parser.add_argument( "-g", "--git-latest",