#!/bin/bash # Test Validation Script # Checks TDD compliance, test coverage, and test execution set -e # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color # Configuration PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" TASK_ID="" # Help function show_help() { echo "Usage: $0 " echo "" echo "Validates test compliance for a specific task:" echo " - TDD compliance (tests written before code)" echo " - Test coverage and execution" echo " - Test file organization" echo "" echo "Examples:" echo " $0 15 # Validate task 15" } # Parse arguments while [[ $# -gt 0 ]]; do case $1 in --help) show_help exit 0 ;; -*) echo -e "${RED}Error: Unknown option $1${NC}" show_help exit 1 ;; *) if [[ -z "$TASK_ID" ]]; then TASK_ID="$1" else echo -e "${RED}Error: Multiple task IDs specified${NC}" exit 1 fi shift ;; esac done # Validate task ID if [[ -z "$TASK_ID" ]]; then echo -e "${RED}Error: Task ID is required${NC}" show_help exit 1 fi # Function to check if a file is a test file is_test_file() { local file="$1" [[ "$file" == *"test"* ]] && [[ "$file" == *.py ]] } # Function to check if a file is source code is_source_file() { local file="$1" [[ "$file" == *.py ]] && [[ "$file" != *"__init__.py" ]] && [[ "$file" != *"test"* ]] } # Function to analyze test coverage for a specific task analyze_task_test_coverage() { local task_id="$1" local test_files=() local source_files=() local coverage_issues=() echo -e "${BLUE}๐Ÿ“Š Test Coverage Analysis for Task $task_id:${NC}" # Find test files while IFS= read -r -d '' file; do if is_test_file "$file"; then test_files+=("$file") fi done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0) # Find source files while IFS= read -r -d '' file; do if is_source_file "$file"; then source_files+=("$file") fi done < <(find "$PROJECT_ROOT/src" -name "*.py" -print0) echo -e " ${BLUE}โ€ข Test files found:${NC} ${#test_files[@]}" echo -e " ${BLUE}โ€ข Source files found:${NC} ${#source_files[@]}" # Check if we have tests if [[ ${#test_files[@]} -eq 0 ]]; then echo -e " ${RED}โŒ No test files found in tests/ directory${NC}" coverage_issues+=("no_tests") return 1 fi # Check test file organization echo -e "${BLUE}๐Ÿ“ Test File Organization:${NC}" for test_file in "${test_files[@]}"; do local relative_path="${test_file#$PROJECT_ROOT/}" echo -e " ${GREEN}โœ… $relative_path${NC}" done # Check if tests are passing echo -e "${BLUE}๐Ÿงช Test Execution Status:${NC}" if command -v pytest >/dev/null 2>&1; then echo -e " ${BLUE}Running tests...${NC}" if cd "$PROJECT_ROOT" && python -m pytest --tb=short -q; then echo -e " ${GREEN}โœ… All tests passing${NC}" else echo -e " ${RED}โŒ Some tests are failing${NC}" coverage_issues+=("tests_failing") return 1 fi else echo -e " ${YELLOW}โš ๏ธ pytest not available - skipping test execution${NC}" coverage_issues+=("pytest_unavailable") fi return 0 } # Function to check TDD compliance check_tdd_compliance() { local task_id="$1" echo -e "${BLUE}๐Ÿ”„ TDD Compliance Check:${NC}" # Check if tests exist before implementation local test_files_count=$(find "$PROJECT_ROOT/tests" -name "*.py" | wc -l) local source_files_count=$(find "$PROJECT_ROOT/src" -name "*.py" | wc -l) if [[ $test_files_count -eq 0 ]]; then echo -e " ${RED}โŒ No test files found - violates TDD principle${NC}" echo -e " ${YELLOW} TDD requires tests to be written before implementation${NC}" return 1 fi # Check test file naming conventions local test_naming_issues=0 while IFS= read -r -d '' file; do local filename=$(basename "$file") if [[ ! "$filename" =~ ^test_.*\.py$ ]] && [[ ! "$filename" =~ ^.*_test\.py$ ]]; then echo -e " ${YELLOW}โš ๏ธ Test file naming: $filename (should start with 'test_' or end with '_test.py')${NC}" test_naming_issues=$((test_naming_issues + 1)) fi done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0) if [[ $test_naming_issues -eq 0 ]]; then echo -e " ${GREEN}โœ… Test file naming conventions followed${NC}" fi # Check for test imports and structure local test_structure_issues=0 while IFS= read -r -d '' file; do if ! grep -q "import.*test\|from.*test\|pytest\|unittest" "$file" 2>/dev/null; then echo -e " ${YELLOW}โš ๏ธ Test file structure: $file (missing test framework imports)${NC}" test_structure_issues=$((test_structure_issues + 1)) fi done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0) if [[ $test_structure_issues -eq 0 ]]; then echo -e " ${GREEN}โœ… Test file structure looks good${NC}" fi echo -e " ${GREEN}โœ… TDD compliance check passed${NC}" return 0 } # Function to provide test improvement recommendations suggest_test_improvements() { local task_id="$1" echo -e "${BLUE}๐Ÿ’ก Test Improvement Recommendations:${NC}" # Check for common test patterns local test_files=() while IFS= read -r -d '' file; do if is_test_file "$file"; then test_files+=("$file") fi done < <(find "$PROJECT_ROOT/tests" -name "*.py" -print0) if [[ ${#test_files[@]} -eq 0 ]]; then echo -e " ${YELLOW}โ€ข Create test files for all source modules${NC}" echo -e " ${YELLOW}โ€ข Start with basic functionality tests${NC}" echo -e " ${YELLOW}โ€ข Use pytest for modern Python testing${NC}" return fi # Analyze test patterns for test_file in "${test_files[@]}"; do local test_functions=$(grep -c "^def test_" "$test_file" 2>/dev/null || echo 0) local test_classes=$(grep -c "^class Test" "$test_file" 2>/dev/null || echo 0) if [[ $test_functions -eq 0 ]] && [[ $test_classes -eq 0 ]]; then echo -e " ${YELLOW}โ€ข Add test functions to $test_file${NC}" fi # Check for edge case coverage if ! grep -q "test.*error\|test.*exception\|test.*edge\|test.*boundary" "$test_file" 2>/dev/null; then echo -e " ${YELLOW}โ€ข Consider adding edge case tests to $test_file${NC}" fi done # General recommendations echo -e " ${YELLOW}โ€ข Ensure tests cover both success and failure scenarios${NC}" echo -e " ${YELLOW}โ€ข Use descriptive test names that explain the scenario${NC}" echo -e " ${YELLOW}โ€ข Mock external dependencies to isolate unit tests${NC}" echo -e " ${YELLOW}โ€ข Aim for high test coverage (80%+)${NC}" } # Main function main() { local exit_code=0 local tdd_compliance=0 local test_coverage=0 echo -e "${BLUE}๐Ÿงช Test Validation for Task $TASK_ID${NC}" echo -e "${BLUE}Focus: TDD compliance, test coverage, and execution${NC}" echo "" # Check TDD compliance if ! check_tdd_compliance "$TASK_ID"; then exit_code=1 tdd_compliance=1 fi echo "" # Analyze test coverage if ! analyze_task_test_coverage "$TASK_ID"; then exit_code=1 test_coverage=1 fi echo "" # Provide improvement recommendations suggest_test_improvements "$TASK_ID" echo "" # Summary echo -e "${BLUE}๐Ÿ“‹ Test Validation Summary:${NC}" if [[ $tdd_compliance -eq 0 ]]; then echo -e " ${GREEN}โœ… TDD compliance: PASSED${NC}" else echo -e " ${RED}โŒ TDD compliance: FAILED${NC}" fi if [[ $test_coverage -eq 0 ]]; then echo -e " ${GREEN}โœ… Test coverage: PASSED${NC}" else echo -e " ${RED}โŒ Test coverage: FAILED${NC}" fi if [[ $exit_code -eq 0 ]]; then echo "" echo -e "${GREEN}๐ŸŽ‰ All test validation checks passed!${NC}" echo -e "${GREEN}โœ… Task $TASK_ID meets test quality standards${NC}" else echo "" echo -e "${RED}๐Ÿšซ Test validation failed - must be resolved before task completion${NC}" echo -e "${YELLOW} Fix all test issues before proceeding${NC}" fi echo "" echo -e "${BLUE}๐Ÿ”ง Quick Fix Commands:${NC}" echo -e " ${BLUE}โ€ข Run tests:${NC} uv run pytest" echo -e " ${BLUE}โ€ข Run with coverage:${NC} uv run pytest --cov=src" echo -e " ${BLUE}โ€ข Re-validate:${NC} $0 $TASK_ID" exit $exit_code } # Run main function main "$@"