mirror of
https://github.com/uutils/coreutils.git
synced 2025-07-07 21:45:01 +00:00
Look at all individual results and summarize
This commit is contained in:
parent
59426b779e
commit
2f872860f0
4 changed files with 154 additions and 31 deletions
8
.github/workflows/CICD.yml
vendored
8
.github/workflows/CICD.yml
vendored
|
@ -935,17 +935,19 @@ jobs:
|
|||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Run sccache-cache
|
||||
uses: mozilla-actions/sccache-action@v0.0.8
|
||||
- name: Install/setup prerequisites
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
|
||||
- name: Build coreutils as multiple binaries
|
||||
shell: bash
|
||||
run: |
|
||||
## Build individual uutil binaries
|
||||
set -v
|
||||
make
|
||||
- name: Install/setup prerequisites
|
||||
- name: Run toybox src
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev
|
||||
## Install/setup prerequisites
|
||||
make toybox-src
|
||||
- name: Run Toybox test suite
|
||||
id: summary
|
||||
|
|
16
.github/workflows/GnuTests.yml
vendored
16
.github/workflows/GnuTests.yml
vendored
|
@ -65,8 +65,9 @@ jobs:
|
|||
TEST_ROOT_FULL_SUMMARY_FILE='gnu-root-full-result.json'
|
||||
TEST_SELINUX_FULL_SUMMARY_FILE='selinux-gnu-full-result.json'
|
||||
TEST_SELINUX_ROOT_FULL_SUMMARY_FILE='selinux-root-gnu-full-result.json'
|
||||
AGGREGATED_SUMMARY_FILE='aggregated-result.json'
|
||||
|
||||
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE SELINUX_SUITE_LOG_FILE SELINUX_ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE TEST_ROOT_FULL_SUMMARY_FILE TEST_SELINUX_FULL_SUMMARY_FILE TEST_SELINUX_ROOT_FULL_SUMMARY_FILE
|
||||
outputs SUITE_LOG_FILE ROOT_SUITE_LOG_FILE SELINUX_SUITE_LOG_FILE SELINUX_ROOT_SUITE_LOG_FILE TEST_FILESET_PREFIX TEST_FILESET_SUFFIX TEST_LOGS_GLOB TEST_SUMMARY_FILE TEST_FULL_SUMMARY_FILE TEST_ROOT_FULL_SUMMARY_FILE TEST_SELINUX_FULL_SUMMARY_FILE TEST_SELINUX_ROOT_FULL_SUMMARY_FILE AGGREGATED_SUMMARY_FILE
|
||||
- name: Checkout code (uutil)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
@ -272,7 +273,7 @@ jobs:
|
|||
if test -f "${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}"
|
||||
then
|
||||
# Look at all individual results and summarize
|
||||
eval $(python3 ${path_UUTILS}/util/analyze-gnu-results.py ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }})
|
||||
eval $(python3 ${path_UUTILS}/util/analyze-gnu-results.py -o=${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }} ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }})
|
||||
|
||||
if [[ "$TOTAL" -eq 0 || "$TOTAL" -eq 1 ]]; then
|
||||
echo "::error ::Failed to parse test results from '${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}'; failing early"
|
||||
|
@ -325,21 +326,26 @@ jobs:
|
|||
with:
|
||||
name: gnu-full-result.json
|
||||
path: ${{ steps.vars.outputs.TEST_FULL_SUMMARY_FILE }}
|
||||
- name: Upload full json results
|
||||
- name: Upload root json results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: gnu-root-full-result.json
|
||||
path: ${{ steps.vars.outputs.TEST_ROOT_FULL_SUMMARY_FILE }}
|
||||
- name: Upload full json results
|
||||
- name: Upload selinux json results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: selinux-gnu-full-result.json
|
||||
path: ${{ steps.vars.outputs.TEST_SELINUX_FULL_SUMMARY_FILE }}
|
||||
- name: Upload full json results
|
||||
- name: Upload selinux root json results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: selinux-root-gnu-full-result.json
|
||||
path: ${{ steps.vars.outputs.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }}
|
||||
- name: Upload aggregated json results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: aggregated-result.json
|
||||
path: ${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }}
|
||||
- name: Compare test failures VS reference
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
|
@ -142,6 +142,7 @@ whitespace
|
|||
wordlist
|
||||
wordlists
|
||||
xattrs
|
||||
xpass
|
||||
|
||||
# * abbreviations
|
||||
consts
|
||||
|
|
|
@ -1,15 +1,102 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
GNU Test Results Analyzer and Aggregator
|
||||
|
||||
This script analyzes and aggregates test results from the GNU test suite.
|
||||
It parses JSON files containing test results (PASS/FAIL/SKIP/ERROR) and:
|
||||
1. Counts the number of tests in each result category
|
||||
2. Can aggregate results from multiple JSON files with priority ordering
|
||||
3. Outputs shell export statements for use in GitHub Actions workflows
|
||||
|
||||
Priority order for aggregation (highest to lowest):
|
||||
- PASS: Takes precedence over all other results (best outcome)
|
||||
- FAIL: Takes precedence over ERROR and SKIP
|
||||
- ERROR: Takes precedence over SKIP
|
||||
- SKIP: Lowest priority
|
||||
|
||||
Usage:
|
||||
- Single file:
|
||||
python analyze-gnu-results.py test-results.json
|
||||
|
||||
- Multiple files (with aggregation):
|
||||
python analyze-gnu-results.py file1.json file2.json
|
||||
|
||||
- With output file for aggregated results:
|
||||
python analyze-gnu-results.py -o=output.json file1.json file2.json
|
||||
|
||||
Output:
|
||||
Prints shell export statements for TOTAL, PASS, FAIL, SKIP, XPASS, and ERROR
|
||||
that can be evaluated in a shell environment.
|
||||
"""
|
||||
import json
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def get_priority(result):
|
||||
"""Return a priority value for result status (lower is higher priority)"""
|
||||
priorities = {
|
||||
"PASS": 0, # PASS is highest priority (best result)
|
||||
"FAIL": 1, # FAIL is second priority
|
||||
"ERROR": 2, # ERROR is third priority
|
||||
"SKIP": 3, # SKIP is lowest priority
|
||||
}
|
||||
return priorities.get(result, 4) # Unknown states have lowest priority
|
||||
|
||||
|
||||
def aggregate_results(json_files):
|
||||
"""
|
||||
Aggregate test results from multiple JSON files.
|
||||
Prioritizes results in the order: SKIP > ERROR > FAIL > PASS
|
||||
"""
|
||||
# Combined results dictionary
|
||||
combined_results = defaultdict(dict)
|
||||
|
||||
# Process each JSON file
|
||||
for json_file in json_files:
|
||||
try:
|
||||
with open(json_file, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
# For each utility and its tests
|
||||
for utility, tests in data.items():
|
||||
for test_name, result in tests.items():
|
||||
# If this test hasn't been seen yet, add it
|
||||
if test_name not in combined_results[utility]:
|
||||
combined_results[utility][test_name] = result
|
||||
else:
|
||||
# If it has been seen, apply priority rules
|
||||
current_priority = get_priority(
|
||||
combined_results[utility][test_name]
|
||||
)
|
||||
new_priority = get_priority(result)
|
||||
|
||||
# Lower priority value means higher precedence
|
||||
if new_priority < current_priority:
|
||||
combined_results[utility][test_name] = result
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: File '{json_file}' not found.", file=sys.stderr)
|
||||
continue
|
||||
except json.JSONDecodeError:
|
||||
print(f"Warning: '{json_file}' is not a valid JSON file.", file=sys.stderr)
|
||||
continue
|
||||
|
||||
return combined_results
|
||||
|
||||
|
||||
def analyze_test_results(json_data):
|
||||
"""
|
||||
Analyze test results from GNU test suite JSON data.
|
||||
Counts PASS, FAIL, SKIP results for all tests.
|
||||
"""
|
||||
# Counters for test results
|
||||
total_tests = 0
|
||||
pass_count = 0
|
||||
fail_count = 0
|
||||
skip_count = 0
|
||||
error_count = 0 # Although not in the JSON, included for compatibility
|
||||
xpass_count = 0 # Not in JSON data but included for compatibility
|
||||
error_count = 0 # Not in JSON data but included for compatibility
|
||||
|
||||
# Analyze each utility's tests
|
||||
for utility, tests in json_data.items():
|
||||
|
@ -22,6 +109,10 @@ def analyze_test_results(json_data):
|
|||
fail_count += 1
|
||||
elif result == "SKIP":
|
||||
skip_count += 1
|
||||
elif result == "ERROR":
|
||||
error_count += 1
|
||||
elif result == "XPASS":
|
||||
xpass_count += 1
|
||||
|
||||
# Return the statistics
|
||||
return {
|
||||
|
@ -29,40 +120,63 @@ def analyze_test_results(json_data):
|
|||
"PASS": pass_count,
|
||||
"FAIL": fail_count,
|
||||
"SKIP": skip_count,
|
||||
"XPASS": xpass_count,
|
||||
"ERROR": error_count,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
# Check if a file argument was provided
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python script.py <json_file>")
|
||||
"""
|
||||
Main function to process JSON files and export variables.
|
||||
Supports both single file analysis and multi-file aggregation.
|
||||
"""
|
||||
# Check if file arguments were provided
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python analyze-gnu-results.py <json> [json ...]")
|
||||
print(" For multiple files, results will be aggregated")
|
||||
print(" Priority SKIP > ERROR > FAIL > PASS")
|
||||
sys.exit(1)
|
||||
|
||||
json_file = sys.argv[1]
|
||||
json_files = sys.argv[1:]
|
||||
output_file = None
|
||||
|
||||
try:
|
||||
# Parse the JSON data from the specified file
|
||||
with open(json_file, "r") as file:
|
||||
json_data = json.load(file)
|
||||
# Check if the first argument is an output file (starts with -)
|
||||
if json_files[0].startswith("-o="):
|
||||
output_file = json_files[0][3:]
|
||||
json_files = json_files[1:]
|
||||
|
||||
# Analyze the results
|
||||
# Process the files
|
||||
if len(json_files) == 1:
|
||||
# Single file analysis
|
||||
try:
|
||||
with open(json_files[0], "r") as file:
|
||||
json_data = json.load(file)
|
||||
results = analyze_test_results(json_data)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{json_files[0]}' not found.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError:
|
||||
print(
|
||||
f"Error: '{json_files[0]}' is not a valid JSON file.", file=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Multiple files - aggregate them
|
||||
json_data = aggregate_results(json_files)
|
||||
results = analyze_test_results(json_data)
|
||||
|
||||
# Export the results as environment variables
|
||||
# For use in shell, print export statements
|
||||
print(f"export TOTAL={results['TOTAL']}")
|
||||
print(f"export PASS={results['PASS']}")
|
||||
print(f"export SKIP={results['SKIP']}")
|
||||
print(f"export FAIL={results['FAIL']}")
|
||||
print(f"export ERROR={results['ERROR']}")
|
||||
# Save aggregated data if output file is specified
|
||||
if output_file:
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(json_data, f, indent=2)
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{json_file}' not found.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError:
|
||||
print(f"Error: '{json_file}' is not a valid JSON", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
# Print export statements for shell evaluation
|
||||
print(f"export TOTAL={results['TOTAL']}")
|
||||
print(f"export PASS={results['PASS']}")
|
||||
print(f"export SKIP={results['SKIP']}")
|
||||
print(f"export FAIL={results['FAIL']}")
|
||||
print(f"export XPASS={results['XPASS']}")
|
||||
print(f"export ERROR={results['ERROR']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue