287 lines
8.9 KiB
Bash
Executable File
287 lines
8.9 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Test Validation Script
|
|
# Checks TDD compliance, test coverage, and test execution
|
|
|
|
set -e
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Configuration
|
|
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
|
TASK_ID=""
|
|
|
|
# Help function
|
|
show_help() {
|
|
echo "Usage: $0 <task-id>"
|
|
echo ""
|
|
echo "Validates test compliance for a specific task:"
|
|
echo " - TDD compliance (tests written before code)"
|
|
echo " - Test coverage and execution"
|
|
echo " - Test file organization"
|
|
echo ""
|
|
echo "Examples:"
|
|
echo " $0 15 # Validate task 15"
|
|
}
|
|
|
|
# Parse arguments
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--help)
|
|
show_help
|
|
exit 0
|
|
;;
|
|
-*)
|
|
echo -e "${RED}Error: Unknown option $1${NC}"
|
|
show_help
|
|
exit 1
|
|
;;
|
|
*)
|
|
if [[ -z "$TASK_ID" ]]; then
|
|
TASK_ID="$1"
|
|
else
|
|
echo -e "${RED}Error: Multiple task IDs specified${NC}"
|
|
exit 1
|
|
fi
|
|
shift
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Validate task ID
|
|
if [[ -z "$TASK_ID" ]]; then
|
|
echo -e "${RED}Error: Task ID is required${NC}"
|
|
show_help
|
|
exit 1
|
|
fi
|
|
|
|
# Function to check if a file is a test file
|
|
is_test_file() {
|
|
local file="$1"
|
|
[[ "$file" == *"test"* ]] && [[ "$file" == *.py ]]
|
|
}
|
|
|
|
# Function to check if a file is source code
|
|
is_source_file() {
|
|
local file="$1"
|
|
[[ "$file" == *.py ]] && [[ "$file" != *"__init__.py" ]] && [[ "$file" != *"test"* ]]
|
|
}
|
|
|
|
# Function to analyze test coverage for a specific task
|
|
analyze_task_test_coverage() {
|
|
local task_id="$1"
|
|
local test_files=()
|
|
local source_files=()
|
|
local coverage_issues=()
|
|
|
|
echo -e "${BLUE}📊 Test Coverage Analysis for Task $task_id:${NC}"
|
|
|
|
# Find test files
|
|
while IFS= read -r -d '' file; do
|
|
if is_test_file "$file"; then
|
|
test_files+=("$file")
|
|
fi
|
|
done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0)
|
|
|
|
# Find source files
|
|
while IFS= read -r -d '' file; do
|
|
if is_source_file "$file"; then
|
|
source_files+=("$file")
|
|
fi
|
|
done < <(find "$PROJECT_ROOT/src" -name "*.py" -print0)
|
|
|
|
echo -e " ${BLUE}• Test files found:${NC} ${#test_files[@]}"
|
|
echo -e " ${BLUE}• Source files found:${NC} ${#source_files[@]}"
|
|
|
|
# Check if we have tests
|
|
if [[ ${#test_files[@]} -eq 0 ]]; then
|
|
echo -e " ${RED}❌ No test files found in tests/ directory${NC}"
|
|
coverage_issues+=("no_tests")
|
|
return 1
|
|
fi
|
|
|
|
# Check test file organization
|
|
echo -e "${BLUE}📁 Test File Organization:${NC}"
|
|
for test_file in "${test_files[@]}"; do
|
|
local relative_path="${test_file#$PROJECT_ROOT/}"
|
|
echo -e " ${GREEN}✅ $relative_path${NC}"
|
|
done
|
|
|
|
# Check if tests are passing
|
|
echo -e "${BLUE}🧪 Test Execution Status:${NC}"
|
|
if command -v pytest >/dev/null 2>&1; then
|
|
echo -e " ${BLUE}Running tests...${NC}"
|
|
if cd "$PROJECT_ROOT" && python -m pytest --tb=short -q; then
|
|
echo -e " ${GREEN}✅ All tests passing${NC}"
|
|
else
|
|
echo -e " ${RED}❌ Some tests are failing${NC}"
|
|
coverage_issues+=("tests_failing")
|
|
return 1
|
|
fi
|
|
else
|
|
echo -e " ${YELLOW}⚠️ pytest not available - skipping test execution${NC}"
|
|
coverage_issues+=("pytest_unavailable")
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Function to check TDD compliance
|
|
check_tdd_compliance() {
|
|
local task_id="$1"
|
|
|
|
echo -e "${BLUE}🔄 TDD Compliance Check:${NC}"
|
|
|
|
# Check if tests exist before implementation
|
|
local test_files_count=$(find "$PROJECT_ROOT/tests" -name "*.py" | wc -l)
|
|
local source_files_count=$(find "$PROJECT_ROOT/src" -name "*.py" | wc -l)
|
|
|
|
if [[ $test_files_count -eq 0 ]]; then
|
|
echo -e " ${RED}❌ No test files found - violates TDD principle${NC}"
|
|
echo -e " ${YELLOW} TDD requires tests to be written before implementation${NC}"
|
|
return 1
|
|
fi
|
|
|
|
# Check test file naming conventions
|
|
local test_naming_issues=0
|
|
while IFS= read -r -d '' file; do
|
|
local filename=$(basename "$file")
|
|
if [[ ! "$filename" =~ ^test_.*\.py$ ]] && [[ ! "$filename" =~ ^.*_test\.py$ ]]; then
|
|
echo -e " ${YELLOW}⚠️ Test file naming: $filename (should start with 'test_' or end with '_test.py')${NC}"
|
|
test_naming_issues=$((test_naming_issues + 1))
|
|
fi
|
|
done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0)
|
|
|
|
if [[ $test_naming_issues -eq 0 ]]; then
|
|
echo -e " ${GREEN}✅ Test file naming conventions followed${NC}"
|
|
fi
|
|
|
|
# Check for test imports and structure
|
|
local test_structure_issues=0
|
|
while IFS= read -r -d '' file; do
|
|
if ! grep -q "import.*test\|from.*test\|pytest\|unittest" "$file" 2>/dev/null; then
|
|
echo -e " ${YELLOW}⚠️ Test file structure: $file (missing test framework imports)${NC}"
|
|
test_structure_issues=$((test_structure_issues + 1))
|
|
fi
|
|
done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0)
|
|
|
|
if [[ $test_structure_issues -eq 0 ]]; then
|
|
echo -e " ${GREEN}✅ Test file structure looks good${NC}"
|
|
fi
|
|
|
|
echo -e " ${GREEN}✅ TDD compliance check passed${NC}"
|
|
return 0
|
|
}
|
|
|
|
# Function to provide test improvement recommendations
|
|
suggest_test_improvements() {
|
|
local task_id="$1"
|
|
|
|
echo -e "${BLUE}💡 Test Improvement Recommendations:${NC}"
|
|
|
|
# Check for common test patterns
|
|
local test_files=()
|
|
while IFS= read -r -d '' file; do
|
|
if is_test_file "$file"; then
|
|
test_files+=("$file")
|
|
fi
|
|
done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0)
|
|
|
|
if [[ ${#test_files[@]} -eq 0 ]]; then
|
|
echo -e " ${YELLOW}• Create test files for all source modules${NC}"
|
|
echo -e " ${YELLOW}• Start with basic functionality tests${NC}"
|
|
echo -e " ${YELLOW}• Use pytest for modern Python testing${NC}"
|
|
return
|
|
fi
|
|
|
|
# Analyze test patterns
|
|
for test_file in "${test_files[@]}"; do
|
|
local test_functions=$(grep -c "^def test_" "$test_file" 2>/dev/null || echo 0)
|
|
local test_classes=$(grep -c "^class Test" "$test_file" 2>/dev/null || echo 0)
|
|
|
|
if [[ $test_functions -eq 0 ]] && [[ $test_classes -eq 0 ]]; then
|
|
echo -e " ${YELLOW}• Add test functions to $test_file${NC}"
|
|
fi
|
|
|
|
# Check for edge case coverage
|
|
if ! grep -q "test.*error\|test.*exception\|test.*edge\|test.*boundary" "$test_file" 2>/dev/null; then
|
|
echo -e " ${YELLOW}• Consider adding edge case tests to $test_file${NC}"
|
|
fi
|
|
done
|
|
|
|
# General recommendations
|
|
echo -e " ${YELLOW}• Ensure tests cover both success and failure scenarios${NC}"
|
|
echo -e " ${YELLOW}• Use descriptive test names that explain the scenario${NC}"
|
|
echo -e " ${YELLOW}• Mock external dependencies to isolate unit tests${NC}"
|
|
echo -e " ${YELLOW}• Aim for high test coverage (80%+)${NC}"
|
|
}
|
|
|
|
# Main function
|
|
main() {
|
|
local exit_code=0
|
|
local tdd_compliance=0
|
|
local test_coverage=0
|
|
|
|
echo -e "${BLUE}🧪 Test Validation for Task $TASK_ID${NC}"
|
|
echo -e "${BLUE}Focus: TDD compliance, test coverage, and execution${NC}"
|
|
echo ""
|
|
|
|
# Check TDD compliance
|
|
if ! check_tdd_compliance "$TASK_ID"; then
|
|
exit_code=1
|
|
tdd_compliance=1
|
|
fi
|
|
echo ""
|
|
|
|
# Analyze test coverage
|
|
if ! analyze_task_test_coverage "$TASK_ID"; then
|
|
exit_code=1
|
|
test_coverage=1
|
|
fi
|
|
echo ""
|
|
|
|
# Provide improvement recommendations
|
|
suggest_test_improvements "$TASK_ID"
|
|
echo ""
|
|
|
|
# Summary
|
|
echo -e "${BLUE}📋 Test Validation Summary:${NC}"
|
|
if [[ $tdd_compliance -eq 0 ]]; then
|
|
echo -e " ${GREEN}✅ TDD compliance: PASSED${NC}"
|
|
else
|
|
echo -e " ${RED}❌ TDD compliance: FAILED${NC}"
|
|
fi
|
|
|
|
if [[ $test_coverage -eq 0 ]]; then
|
|
echo -e " ${GREEN}✅ Test coverage: PASSED${NC}"
|
|
else
|
|
echo -e " ${RED}❌ Test coverage: FAILED${NC}"
|
|
fi
|
|
|
|
if [[ $exit_code -eq 0 ]]; then
|
|
echo ""
|
|
echo -e "${GREEN}🎉 All test validation checks passed!${NC}"
|
|
echo -e "${GREEN}✅ Task $TASK_ID meets test quality standards${NC}"
|
|
else
|
|
echo ""
|
|
echo -e "${RED}🚫 Test validation failed - must be resolved before task completion${NC}"
|
|
echo -e "${YELLOW} Fix all test issues before proceeding${NC}"
|
|
fi
|
|
|
|
echo ""
|
|
echo -e "${BLUE}🔧 Quick Fix Commands:${NC}"
|
|
echo -e " ${BLUE}• Run tests:${NC} uv run pytest"
|
|
echo -e " ${BLUE}• Run with coverage:${NC} uv run pytest --cov=src"
|
|
echo -e " ${BLUE}• Re-validate:${NC} $0 $TASK_ID"
|
|
|
|
exit $exit_code
|
|
}
|
|
|
|
# Run main function
|
|
main "$@"
|