Merge pull request #7448 from sylvestre/selinux2

selinux: run the GNU test too
This commit is contained in:
Daniel Hofstetter 2025-03-17 15:01:33 +01:00 committed by GitHub
commit e6461269fc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 409 additions and 110 deletions

View file

@ -267,6 +267,10 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Install/setup prerequisites
shell: bash
run: |
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
- name: Run sccache-cache
uses: mozilla-actions/sccache-action@v0.0.8
- name: "`make build`"
@ -402,7 +406,7 @@ jobs:
run: |
## Install dependencies
sudo apt-get update
sudo apt-get install jq
sudo apt-get install jq libselinux1-dev
- name: "`make install`"
shell: bash
run: |
@ -847,6 +851,7 @@ jobs:
- name: Install/setup prerequisites
shell: bash
run: |
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
## Install/setup prerequisites
make prepare-busytest
- name: Run BusyBox test suite
@ -930,16 +935,19 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: Run sccache-cache
uses: mozilla-actions/sccache-action@v0.0.8
- name: Install/setup prerequisites
shell: bash
run: |
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
- name: Build coreutils as multiple binaries
shell: bash
run: |
## Build individual uutil binaries
set -v
make
- name: Install/setup prerequisites
- name: Run toybox src
shell: bash
run: |
## Install/setup prerequisites
make toybox-src
- name: Run Toybox test suite
id: summary

View file

@ -55,12 +55,19 @@ jobs:
#
SUITE_LOG_FILE="${path_GNU_tests}/test-suite.log"
ROOT_SUITE_LOG_FILE="${path_GNU_tests}/test-suite-root.log"
SELINUX_SUITE_LOG_FILE="${path_GNU_tests}/selinux-test-suite.log"
SELINUX_ROOT_SUITE_LOG_FILE="${path_GNU_tests}/selinux-test-suite-root.log"
TEST_LOGS_GLOB="${path_GNU_tests}/**/*.log" ## note: not usable at bash CLI; [why] double globstar not enabled by default b/c MacOS includes only bash v3 which doesn't have double globstar support
TEST_FILESET_PREFIX='test-fileset-IDs.sha1#'
TEST_FILESET_SUFFIX='.txt'
TEST_SUMMARY_FILE='gnu-result.json'
TEST_FULL_SUMMARY_FILE='gnu-full-result.json'
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE
TEST_ROOT_FULL_SUMMARY_FILE='gnu-root-full-result.json'
TEST_SELINUX_FULL_SUMMARY_FILE='selinux-gnu-full-result.json'
TEST_SELINUX_ROOT_FULL_SUMMARY_FILE='selinux-root-gnu-full-result.json'
AGGREGATED_SUMMARY_FILE='aggregated-result.json'
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE SELINUX_SUITE_LOG_FILE SELINUX_ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE TEST_ROOT_FULL_SUMMARY_FILE TEST_SELINUX_FULL_SUMMARY_FILE TEST_SELINUX_ROOT_FULL_SUMMARY_FILE AGGREGATED_SUMMARY_FILE
- name: Checkout code (uutil)
uses: actions/checkout@v4
with:
@ -82,6 +89,44 @@ jobs:
submodules: false
persist-credentials: false
- name: Selinux - Setup Lima
uses: lima-vm/lima-actions/setup@v1
id: lima-actions-setup
- name: Selinux - Cache ~/.cache/lima
uses: actions/cache@v4
with:
path: ~/.cache/lima
key: lima-${{ steps.lima-actions-setup.outputs.version }}
- name: Selinux - Start Fedora VM with SELinux
run: limactl start --plain --name=default --cpus=4 --disk=40 --memory=8 --network=lima:user-v2 template://fedora
- name: Selinux - Setup SSH
uses: lima-vm/lima-actions/ssh@v1
- name: Selinux - Verify SELinux Status and Configuration
run: |
lima getenforce
lima ls -laZ /etc/selinux
lima sudo sestatus
# Ensure we're running in enforcing mode
lima sudo setenforce 1
lima getenforce
# Create test files with SELinux contexts for testing
lima sudo mkdir -p /var/test_selinux
lima sudo touch /var/test_selinux/test_file
lima sudo chcon -t etc_t /var/test_selinux/test_file
lima ls -Z /var/test_selinux/test_file # Verify context
- name: Selinux - Install dependencies in VM
run: |
lima sudo dnf -y update
lima sudo dnf -y install git autoconf autopoint bison texinfo gperf gcc g++ gdb jq libacl-devel libattr-devel libcap-devel libselinux-devel attr rustup clang-devel texinfo-tex wget automake patch quilt
lima rustup-init -y --default-toolchain stable
- name: Override submodule URL and initialize submodules
# Use github instead of upstream git server
run: |
@ -125,12 +170,68 @@ jobs:
sudo update-locale
echo "After:"
locale -a
- name: Selinux - Copy the sources to VM
run: |
rsync -a -e ssh . lima-default:~/work/
- name: Build binaries
shell: bash
run: |
## Build binaries
cd '${{ steps.vars.outputs.path_UUTILS }}'
bash util/build-gnu.sh --release-build
- name: Selinux - Generate selinux tests list
run: |
# Find and list all tests that require SELinux
lima bash -c "cd ~/work/gnu/ && grep -l 'require_selinux_' -r tests/ > ~/work/uutils/selinux-tests.txt"
lima bash -c "cd ~/work/uutils/ && cat selinux-tests.txt"
# Count the tests
lima bash -c "cd ~/work/uutils/ && echo 'Found SELinux tests:'; wc -l selinux-tests.txt"
- name: Selinux - Build for selinux tests
run: |
lima bash -c "cd ~/work/uutils/ && bash util/build-gnu.sh"
lima bash -c "mkdir -p ~/work/gnu/tests-selinux/"
- name: Selinux - Run selinux tests
run: |
lima sudo setenforce 1
lima getenforce
lima cat /proc/filesystems
lima bash -c "cd ~/work/uutils/ && bash util/run-gnu-test.sh \$(cat selinux-tests.txt)"
- name: Selinux - Extract testing info from individual logs into JSON
shell: bash
run : |
lima bash -c "cd ~/work/gnu/ && python3 ../uutils/util/gnu-json-result.py tests > ~/work/gnu/tests-selinux/${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }}"
- name: Selinux/root - Run selinux tests
run: |
lima bash -c "cd ~/work/uutils/ && CI=1 bash util/run-gnu-test.sh run-root \$(cat selinux-tests.txt)"
- name: Selinux/root - Extract testing info from individual logs into JSON
shell: bash
run : |
lima bash -c "cd ~/work/gnu/ && python3 ../uutils/util/gnu-json-result.py tests > ~/work/gnu/tests-selinux/${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }}"
- name: Selinux - Collect test logs and test results
run: |
mkdir -p ${{ steps.vars.outputs.path_GNU_tests }}-selinux
# Copy the test logs from the Lima VM to the host
lima bash -c "cp ~/work/gnu/tests/test-suite.log ~/work/gnu/tests-selinux/ || echo 'No test-suite.log found'"
lima bash -c "cp ~/work/gnu/tests/test-suite-root.log ~/work/gnu/tests-selinux/ || echo 'No test-suite-root.log found'"
rsync -v -a -e ssh lima-default:~/work/gnu/tests-selinux/ ./${{ steps.vars.outputs.path_GNU_tests }}-selinux/
# Copy SELinux logs to the main test directory for integrated processing
cp -f ${{ steps.vars.outputs.path_GNU_tests }}-selinux/test-suite.log ${{ steps.vars.outputs.path_GNU_tests }}/selinux-test-suite.log
cp -f ${{ steps.vars.outputs.path_GNU_tests }}-selinux/test-suite-root.log ${{ steps.vars.outputs.path_GNU_tests }}/selinux-test-suite-root.log
cp -f ${{ steps.vars.outputs.path_GNU_tests }}-selinux/${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} .
cp -f ${{ steps.vars.outputs.path_GNU_tests }}-selinux/${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }} .
- name: Run GNU tests
shell: bash
run: |
@ -138,6 +239,13 @@ jobs:
path_GNU='${{ steps.vars.outputs.path_GNU }}'
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
bash "${path_UUTILS}/util/run-gnu-test.sh"
- name: Extract testing info from individual logs into JSON
shell: bash
run : |
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
python ${path_UUTILS}/util/gnu-json-result.py ${{ steps.vars.outputs.path_GNU_tests }} > ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}
- name: Run GNU root tests
shell: bash
run: |
@ -145,35 +253,40 @@ jobs:
path_GNU='${{ steps.vars.outputs.path_GNU }}'
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
bash "${path_UUTILS}/util/run-gnu-test.sh" run-root
- name: Extract testing info into JSON
- name: Extract testing info from individual logs (run as root) into JSON
shell: bash
run : |
## Extract testing info into JSON
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
python ${path_UUTILS}/util/gnu-json-result.py ${{ steps.vars.outputs.path_GNU_tests }} > ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}
python ${path_UUTILS}/util/gnu-json-result.py ${{ steps.vars.outputs.path_GNU_tests }} > ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }}
- name: Extract/summarize testing info
id: summary
shell: bash
run: |
## Extract/summarize testing info
outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; }
#
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
#
SUITE_LOG_FILE='${{ steps.vars.outputs.SUITE_LOG_FILE }}'
ROOT_SUITE_LOG_FILE='${{ steps.vars.outputs.ROOT_SUITE_LOG_FILE }}'
ls -al ${SUITE_LOG_FILE} ${ROOT_SUITE_LOG_FILE}
if test -f "${SUITE_LOG_FILE}"
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
# Check if the file exists
if test -f "${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}"
then
source ${path_UUTILS}/util/analyze-gnu-results.sh ${SUITE_LOG_FILE} ${ROOT_SUITE_LOG_FILE}
# Look at all individual results and summarize
eval $(python3 ${path_UUTILS}/util/analyze-gnu-results.py -o=${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }})
if [[ "$TOTAL" -eq 0 || "$TOTAL" -eq 1 ]]; then
echo "::error ::Failed to parse test results from '${SUITE_LOG_FILE}'; failing early"
echo "::error ::Failed to parse test results from '${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}'; failing early"
exit 1
fi
output="GNU tests summary = TOTAL: $TOTAL / PASS: $PASS / FAIL: $FAIL / ERROR: $ERROR / SKIP: $SKIP"
echo "${output}"
if [[ "$FAIL" -gt 0 || "$ERROR" -gt 0 ]]; then echo "::warning ::${output}" ; fi
if [[ "$FAIL" -gt 0 || "$ERROR" -gt 0 ]]; then
echo "::warning ::${output}"
fi
jq -n \
--arg date "$(date --rfc-email)" \
--arg sha "$GITHUB_SHA" \
@ -187,9 +300,10 @@ jobs:
HASH=$(sha1sum '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' | cut --delim=" " -f 1)
outputs HASH
else
echo "::error ::Failed to find summary of test results (missing '${SUITE_LOG_FILE}'); failing early"
echo "::error ::Failed to find summary of test results (missing '${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}'); failing early"
exit 1
fi
# Compress logs before upload (fails otherwise)
gzip ${{ steps.vars.outputs.TEST_LOGS_GLOB }}
- name: Reserve SHA1/ID of 'test-summary'
@ -212,6 +326,26 @@ jobs:
with:
name: gnu-full-result.json
path: ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}
- name: Upload root json results
uses: actions/upload-artifact@v4
with:
name: gnu-root-full-result.json
path: ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }}
- name: Upload selinux json results
uses: actions/upload-artifact@v4
with:
name: selinux-gnu-full-result.json
path: ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }}
- name: Upload selinux root json results
uses: actions/upload-artifact@v4
with:
name: selinux-root-gnu-full-result.json
path: ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }}
- name: Upload aggregated json results
uses: actions/upload-artifact@v4
with:
name: aggregated-result.json
path: ${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }}
- name: Compare test failures VS reference
shell: bash
run: |
@ -219,7 +353,11 @@ jobs:
have_new_failures=""
REF_LOG_FILE='${{ steps.vars.outputs.path_reference }}/test-logs/test-suite.log'
ROOT_REF_LOG_FILE='${{ steps.vars.outputs.path_reference }}/test-logs/test-suite-root.log'
SELINUX_REF_LOG_FILE='${{ steps.vars.outputs.path_reference }}/test-logs/selinux-test-suite.log'
SELINUX_ROOT_REF_LOG_FILE='${{ steps.vars.outputs.path_reference }}/test-logs/selinux-test-suite-root.log'
REF_SUMMARY_FILE='${{ steps.vars.outputs.path_reference }}/test-summary/gnu-result.json'
REPO_DEFAULT_BRANCH='${{ steps.vars.outputs.repo_default_branch }}'
path_UUTILS='${{ steps.vars.outputs.path_UUTILS }}'
# https://github.com/uutils/coreutils/issues/4294

View file

@ -143,6 +143,7 @@ whitespace
wordlist
wordlists
xattrs
xpass
# * abbreviations
consts

View file

@ -57,11 +57,16 @@ TOYBOX_ROOT := $(BASEDIR)/tmp
TOYBOX_VER := 0.8.12
TOYBOX_SRC := $(TOYBOX_ROOT)/toybox-$(TOYBOX_VER)
ifeq ($(SELINUX_ENABLED),)
SELINUX_ENABLED := 0
ifdef SELINUX_ENABLED
override SELINUX_ENABLED := 0
# Now check if we should enable it (only on non-Windows)
ifneq ($(OS),Windows_NT)
ifeq ($(shell /sbin/selinuxenabled 2>/dev/null ; echo $$?),0)
SELINUX_ENABLED := 1
ifeq ($(shell if [ -x /sbin/selinuxenabled ] && /sbin/selinuxenabled 2>/dev/null; then echo 0; else echo 1; fi),0)
override SELINUX_ENABLED := 1
$(info /sbin/selinuxenabled successful)
else
$(info SELINUX_ENABLED=1 but /sbin/selinuxenabled failed)
endif
endif
endif
@ -176,9 +181,7 @@ SELINUX_PROGS := \
ifneq ($(OS),Windows_NT)
PROGS := $(PROGS) $(UNIX_PROGS)
endif
ifeq ($(SELINUX_ENABLED),1)
# Build the selinux command even if not on the system
PROGS := $(PROGS) $(SELINUX_PROGS)
endif
@ -265,6 +268,7 @@ TEST_SPEC_FEATURE := test_unimplemented
else ifeq ($(SELINUX_ENABLED),1)
TEST_NO_FAIL_FAST :=
TEST_SPEC_FEATURE := feat_selinux
BUILD_SPEC_FEATURE := feat_selinux
endif
define TEST_BUSYBOX
@ -288,11 +292,15 @@ use_default := 1
build-pkgs:
ifneq (${MULTICALL}, y)
ifdef BUILD_SPEC_FEATURE
${CARGO} build ${CARGOFLAGS} --features "$(BUILD_SPEC_FEATURE)" ${PROFILE_CMD} $(foreach pkg,$(EXES),-p uu_$(pkg))
else
${CARGO} build ${CARGOFLAGS} ${PROFILE_CMD} $(foreach pkg,$(EXES),-p uu_$(pkg))
endif
endif
build-coreutils:
${CARGO} build ${CARGOFLAGS} --features "${EXES}" ${PROFILE_CMD} --no-default-features
${CARGO} build ${CARGOFLAGS} --features "${EXES} $(BUILD_SPEC_FEATURE)" ${PROFILE_CMD} --no-default-features
build: build-coreutils build-pkgs

184
util/analyze-gnu-results.py Normal file
View file

@ -0,0 +1,184 @@
#!/usr/bin/env python3
"""
GNU Test Results Analyzer and Aggregator
This script analyzes and aggregates test results from the GNU test suite.
It parses JSON files containing test results (PASS/FAIL/SKIP/ERROR) and:
1. Counts the number of tests in each result category
2. Can aggregate results from multiple JSON files with priority ordering
3. Outputs shell export statements for use in GitHub Actions workflows
Priority order for aggregation (highest to lowest):
- PASS: Takes precedence over all other results (best outcome)
- FAIL: Takes precedence over ERROR and SKIP
- ERROR: Takes precedence over SKIP
- SKIP: Lowest priority
Usage:
- Single file:
python analyze-gnu-results.py test-results.json
- Multiple files (with aggregation):
python analyze-gnu-results.py file1.json file2.json
- With output file for aggregated results:
python analyze-gnu-results.py -o=output.json file1.json file2.json
Output:
Prints shell export statements for TOTAL, PASS, FAIL, SKIP, XPASS, and ERROR
that can be evaluated in a shell environment.
"""
import json
import sys
from collections import defaultdict
def get_priority(result):
"""Return a priority value for result status (lower is higher priority)"""
priorities = {
"PASS": 0, # PASS is highest priority (best result)
"FAIL": 1, # FAIL is second priority
"ERROR": 2, # ERROR is third priority
"SKIP": 3, # SKIP is lowest priority
}
return priorities.get(result, 4) # Unknown states have lowest priority
def aggregate_results(json_files):
"""
Aggregate test results from multiple JSON files.
Prioritizes results in the order: SKIP > ERROR > FAIL > PASS
"""
# Combined results dictionary
combined_results = defaultdict(dict)
# Process each JSON file
for json_file in json_files:
try:
with open(json_file, "r") as f:
data = json.load(f)
# For each utility and its tests
for utility, tests in data.items():
for test_name, result in tests.items():
# If this test hasn't been seen yet, add it
if test_name not in combined_results[utility]:
combined_results[utility][test_name] = result
else:
# If it has been seen, apply priority rules
current_priority = get_priority(
combined_results[utility][test_name]
)
new_priority = get_priority(result)
# Lower priority value means higher precedence
if new_priority < current_priority:
combined_results[utility][test_name] = result
except FileNotFoundError:
print(f"Warning: File '{json_file}' not found.", file=sys.stderr)
continue
except json.JSONDecodeError:
print(f"Warning: '{json_file}' is not a valid JSON file.", file=sys.stderr)
continue
return combined_results
def analyze_test_results(json_data):
"""
Analyze test results from GNU test suite JSON data.
Counts PASS, FAIL, SKIP results for all tests.
"""
# Counters for test results
total_tests = 0
pass_count = 0
fail_count = 0
skip_count = 0
xpass_count = 0 # Not in JSON data but included for compatibility
error_count = 0 # Not in JSON data but included for compatibility
# Analyze each utility's tests
for utility, tests in json_data.items():
for test_name, result in tests.items():
total_tests += 1
match result:
case "PASS":
pass_count += 1
case "FAIL":
fail_count += 1
case "SKIP":
skip_count += 1
case "ERROR":
error_count += 1
case "XPASS":
xpass_count += 1
# Return the statistics
return {
"TOTAL": total_tests,
"PASS": pass_count,
"FAIL": fail_count,
"SKIP": skip_count,
"XPASS": xpass_count,
"ERROR": error_count,
}
def main():
"""
Main function to process JSON files and export variables.
Supports both single file analysis and multi-file aggregation.
"""
# Check if file arguments were provided
if len(sys.argv) < 2:
print("Usage: python analyze-gnu-results.py <json> [json ...]")
print(" For multiple files, results will be aggregated")
print(" Priority SKIP > ERROR > FAIL > PASS")
sys.exit(1)
json_files = sys.argv[1:]
output_file = None
# Check if the first argument is an output file (starts with -)
if json_files[0].startswith("-o="):
output_file = json_files[0][3:]
json_files = json_files[1:]
# Process the files
if len(json_files) == 1:
# Single file analysis
try:
with open(json_files[0], "r") as file:
json_data = json.load(file)
results = analyze_test_results(json_data)
except FileNotFoundError:
print(f"Error: File '{json_files[0]}' not found.", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError:
print(
f"Error: '{json_files[0]}' is not a valid JSON file.", file=sys.stderr
)
sys.exit(1)
else:
# Multiple files - aggregate them
json_data = aggregate_results(json_files)
results = analyze_test_results(json_data)
# Save aggregated data if output file is specified
if output_file:
with open(output_file, "w") as f:
json.dump(json_data, f, indent=2)
# Print export statements for shell evaluation
print(f"export TOTAL={results['TOTAL']}")
print(f"export PASS={results['PASS']}")
print(f"export SKIP={results['SKIP']}")
print(f"export FAIL={results['FAIL']}")
print(f"export XPASS={results['XPASS']}")
print(f"export ERROR={results['ERROR']}")
if __name__ == "__main__":
main()

View file

@ -1,79 +0,0 @@
#!/usr/bin/env bash
# spell-checker:ignore xpass XPASS testsuite
set -e
# As we do two builds (with and without root), we need to do some trivial maths
# to present the merge results
# this script will export the values in the term
if test $# -ne 2; then
echo "syntax:"
echo "$0 testsuite.log root-testsuite.log"
fi
SUITE_LOG_FILE=$1
ROOT_SUITE_LOG_FILE=$2
if test ! -f "${SUITE_LOG_FILE}"; then
echo "${SUITE_LOG_FILE} has not been found"
exit 1
fi
if test ! -f "${ROOT_SUITE_LOG_FILE}"; then
echo "${ROOT_SUITE_LOG_FILE} has not been found"
exit 1
fi
function get_total {
# Total of tests executed
# They are the normal number of tests as they are skipped in the normal run
NON_ROOT=$(sed -n "s/.*# TOTAL: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $NON_ROOT
}
function get_pass {
# This is the sum of the two test suites.
# In the normal run, they are SKIP
NON_ROOT=$(sed -n "s/.*# PASS: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
AS_ROOT=$(sed -n "s/.*# PASS: \(.*\)/\1/p" "${ROOT_SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $((NON_ROOT + AS_ROOT))
}
function get_skip {
# As some of the tests executed as root as still SKIP (ex: selinux), we
# need to some maths:
# Number of tests skip as user - total test as root + skipped as root
TOTAL_AS_ROOT=$(sed -n "s/.*# TOTAL: \(.*\)/\1/p" "${ROOT_SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
NON_ROOT=$(sed -n "s/.*# SKIP: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
AS_ROOT=$(sed -n "s/.*# SKIP: \(.*\)/\1/p" "${ROOT_SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $((NON_ROOT - TOTAL_AS_ROOT + AS_ROOT))
}
function get_fail {
# They used to be SKIP, now they fail (this is a good news)
NON_ROOT=$(sed -n "s/.*# FAIL: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
AS_ROOT=$(sed -n "s/.*# FAIL: \(.*\)/\1/p" "${ROOT_SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $((NON_ROOT + AS_ROOT))
}
function get_xpass {
NON_ROOT=$(sed -n "s/.*# XPASS: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $NON_ROOT
}
function get_error {
# They used to be SKIP, now they error (this is a good news)
NON_ROOT=$(sed -n "s/.*# ERROR: \(.*\)/\1/p" "${SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
AS_ROOT=$(sed -n "s/.*# ERROR:: \(.*\)/\1/p" "${ROOT_SUITE_LOG_FILE}" | tr -d '\r' | head -n1)
echo $((NON_ROOT + AS_ROOT))
}
# we don't need the return codes indeed, ignore them
# shellcheck disable=SC2155
{
export TOTAL=$(get_total)
export PASS=$(get_pass)
export SKIP=$(get_skip)
export FAIL=$(get_fail)
export XPASS=$(get_xpass)
export ERROR=$(get_error)
}

View file

@ -240,6 +240,10 @@ sed -i "s/ {ERR_SUBST=>\"s\/(unrecognized|unknown) option \[-' \]\*foobar\[' \]
# Remove the check whether a util was built. Otherwise tests against utils like "arch" are not run.
sed -i "s|require_built_ |# require_built_ |g" init.cfg
# exit early for the selinux check. The first is enough for us.
sed -i "s|# Independent of whether SELinux|return 0\n #|g" init.cfg
# Some tests are executed with the "nobody" user.
# The check to verify if it works is based on the GNU coreutils version
# making it too restrictive for us

View file

@ -9,7 +9,16 @@ from pathlib import Path
out = {}
if len(sys.argv) != 2:
print("Usage: python gnu-json-result.py <gnu_test_directory>")
sys.exit(1)
test_dir = Path(sys.argv[1])
if not test_dir.is_dir():
print(f"Directory {test_dir} does not exist.")
sys.exit(1)
# Test all the logs from the test execution
for filepath in test_dir.glob("**/*.log"):
path = Path(filepath)
current = out
@ -25,7 +34,8 @@ for filepath in test_dir.glob("**/*.log"):
)
if result:
current[path.name] = result.group(1)
except:
pass
except Exception as e:
print(f"Error processing file {path}: {e}", file=sys.stderr)
print(json.dumps(out, indent=2, sort_keys=True))

View file

@ -43,7 +43,27 @@ cd "${path_GNU}" && echo "[ pwd:'${PWD}' ]"
export RUST_BACKTRACE=1
if test "$1" != "run-root"; then
# Determine if we have SELinux tests
has_selinux_tests=false
if test $# -ge 1; then
for t in "$@"; do
if [[ "$t" == *"selinux"* ]]; then
has_selinux_tests=true
break
fi
done
fi
if [[ "$1" == "run-root" && "$has_selinux_tests" == true ]]; then
# Handle SELinux root tests separately
shift
if test -n "$CI"; then
echo "Running SELinux tests as root"
# Don't use check-root here as the upstream root tests is hardcoded
sudo "${MAKE}" -j "$("${NPROC}")" check TESTS="$*" SUBDIRS=. RUN_EXPENSIVE_TESTS=yes RUN_VERY_EXPENSIVE_TESTS=yes VERBOSE=no gl_public_submodule_commit="" srcdir="${path_GNU}" TEST_SUITE_LOG="tests/test-suite-root.log" || :
fi
exit 0
elif test "$1" != "run-root"; then
if test $# -ge 1; then
# if set, run only the tests passed
SPECIFIC_TESTS=""
@ -82,8 +102,13 @@ else
# in case we would like to run tests requiring root
if test -z "$1" -o "$1" == "run-root"; then
if test -n "$CI"; then
echo "Running check-root to run only root tests"
sudo "${MAKE}" -j "$("${NPROC}")" check-root SUBDIRS=. RUN_EXPENSIVE_TESTS=yes RUN_VERY_EXPENSIVE_TESTS=yes VERBOSE=no gl_public_submodule_commit="" srcdir="${path_GNU}" TEST_SUITE_LOG="tests/test-suite-root.log" || :
if test $# -ge 2; then
echo "Running check-root to run only root tests"
sudo "${MAKE}" -j "$("${NPROC}")" check-root TESTS="$2" SUBDIRS=. RUN_EXPENSIVE_TESTS=yes RUN_VERY_EXPENSIVE_TESTS=yes VERBOSE=no gl_public_submodule_commit="" srcdir="${path_GNU}" TEST_SUITE_LOG="tests/test-suite-root.log" || :
else
echo "Running check-root to run only root tests"
sudo "${MAKE}" -j "$("${NPROC}")" check-root SUBDIRS=. RUN_EXPENSIVE_TESTS=yes RUN_VERY_EXPENSIVE_TESTS=yes VERBOSE=no gl_public_submodule_commit="" srcdir="${path_GNU}" TEST_SUITE_LOG="tests/test-suite-root.log" || :
fi
fi
fi
fi