-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathfast-automation.sh
More file actions
executable file
·278 lines (252 loc) · 12.8 KB
/
fast-automation.sh
File metadata and controls
executable file
·278 lines (252 loc) · 12.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#!/bin/bash
# Fast automation script - CIDX fast unit tests only
# Runs pure unit tests that don't require external dependencies:
# - No real servers or API calls
# - No containers (Docker, Qdrant, Ollama)
# - No external APIs (VoyageAI, auth servers)
# - No special permissions (/var/lib access)
# Use server-fast-automation.sh for tests with dependencies
set -e # Exit on any error
# TELEMETRY: Create telemetry directory for test performance tracking
TELEMETRY_DIR=".test-telemetry"
mkdir -p "$TELEMETRY_DIR"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
TELEMETRY_FILE="$TELEMETRY_DIR/fast-automation-${TIMESTAMP}.log"
DURATION_FILE="$TELEMETRY_DIR/test-durations-${TIMESTAMP}.txt"
# Source .env files if they exist (for local testing)
if [[ -f ".env.local" ]]; then
source .env.local
fi
if [[ -f ".env" ]]; then
source .env
fi
echo "🖥️ Starting CLI-focused fast automation pipeline..."
echo "==========================================="
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_step() {
echo -e "\n${BLUE}➡️ $1${NC}"
}
print_success() {
echo -e "${GREEN}✅ $1${NC}"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
print_error() {
echo -e "${RED}❌ $1${NC}"
}
# Check if we're in the right directory
if [[ ! -f "pyproject.toml" ]]; then
print_error "Not in project root directory (pyproject.toml not found)"
exit 1
fi
# Check Python version (GitHub Actions tests multiple versions, we'll use current)
print_step "Checking Python version"
PYTHON_VERSION=$(python3 --version 2>&1 | cut -d " " -f 2)
echo "Using Python $PYTHON_VERSION"
print_success "Python version checked"
# 1. Install dependencies (same as GitHub Actions)
print_step "Installing dependencies"
# Workaround for pip editable install path resolution issue:
# Install from parent directory to avoid path doubling bug
PROJECT_DIR=$(pwd)
PROJECT_NAME=$(basename "$PROJECT_DIR")
cd ..
if pip install -e "./$PROJECT_NAME[dev]" --break-system-packages 2>/dev/null; then
:
elif pip install -e "./$PROJECT_NAME[dev]" --user 2>/dev/null; then
:
else
pip install -e "./$PROJECT_NAME[dev]"
fi
cd "$PROJECT_DIR"
print_success "Dependencies installed"
# 2. Lint CLI-related code with ruff
print_step "Running ruff linter on CLI code"
if ruff check src/code_indexer/cli.py src/code_indexer/mode_* src/code_indexer/remote/ src/code_indexer/api_clients/ src/code_indexer/business_logic/ tests/unit/cli/ tests/unit/remote/ tests/unit/api_clients/; then
print_success "CLI ruff linting passed"
else
print_error "CLI ruff linting failed"
exit 1
fi
# 3. Check CLI code formatting with black
print_step "Checking CLI code formatting with black"
if black --check src/code_indexer/cli.py src/code_indexer/mode_* src/code_indexer/remote/ src/code_indexer/api_clients/ src/code_indexer/business_logic/ tests/unit/cli/ tests/unit/remote/ tests/unit/api_clients/; then
print_success "CLI black formatting check passed"
else
print_error "CLI black formatting check failed"
print_warning "Run 'black' on the CLI-related files to fix formatting"
exit 1
fi
# 4. Type check CLI code with mypy
print_step "Running mypy type checking on CLI code"
if mypy src/code_indexer/cli.py src/code_indexer/mode_* src/code_indexer/remote/ src/code_indexer/api_clients/ src/code_indexer/business_logic/ --ignore-missing-imports; then
print_success "CLI MyPy type checking passed"
else
print_error "CLI MyPy type checking failed"
exit 1
fi
# 5. Run FAST unit tests only (excluding external dependencies)
print_step "Running fast unit tests (no external services)"
echo "ℹ️ Testing FAST unit test functionality including:"
echo " • Command-line interface parsing and validation"
echo " • Configuration and mode detection"
echo " • Core business logic (without API calls)"
echo " • Text processing and chunking"
echo " • Progress reporting and display"
echo " • Error handling and validation"
echo ""
echo "⚠️ EXCLUDED: Tests requiring real servers, containers, or external APIs"
# Run ONLY fast unit tests that don't require external services
# TELEMETRY: Add --durations=0 to capture ALL test durations
echo "📊 Telemetry enabled: Results will be saved to $TELEMETRY_FILE"
echo "⏱️ Duration report: $DURATION_FILE"
python3 -m pytest \
tests/unit/ \
tests/mcpb/ \
--durations=0 \
--ignore=tests/unit/server/ \
--ignore=tests/unit/infrastructure/ \
--ignore=tests/mcpb/integration/test_bridge_e2e_real.py \
--ignore=tests/unit/api_clients/test_base_cidx_remote_api_client_real.py \
--ignore=tests/unit/api_clients/test_remote_query_client_real.py \
--ignore=tests/unit/api_clients/test_business_logic_integration_real.py \
--ignore=tests/unit/api_clients/test_repository_linking_client_real.py \
--ignore=tests/unit/api_clients/test_jwt_token_manager_real.py \
--ignore=tests/unit/api_clients/test_real_api_integration_required.py \
--ignore=tests/unit/api_clients/test_messi_rule2_compliance.py \
--ignore=tests/unit/api_clients/test_admin_api_client.py \
--ignore=tests/unit/api_clients/test_admin_client_golden_repos_maintenance.py \
--ignore=tests/unit/api_clients/test_jobs_cancel_status_real_integration.py \
--ignore=tests/unit/api_clients/test_base_cidx_remote_api_client.py \
--ignore=tests/unit/api_clients/test_jobs_api_client_tdd.py \
--ignore=tests/unit/api_clients/test_isolation_utils.py \
--ignore=tests/unit/api_clients/test_jobs_api_client_cancel_tdd.py \
--ignore=tests/unit/api_clients/test_remote_query_client.py \
--ignore=tests/unit/api_clients/test_repos_client_tdd.py \
--ignore=tests/unit/cli/test_admin_commands.py \
--ignore=tests/unit/cli/test_explicit_authentication_commands.py \
--ignore=tests/unit/cli/test_jobs_cli_e2e_tdd.py \
--ignore=tests/unit/cli/test_password_security_validation.py \
--ignore=tests/unit/cli/test_server_lifecycle_commands.py \
--ignore=tests/unit/cli/test_sync_command_structure.py \
--ignore=tests/unit/cli/test_cli_init_segment_size.py \
--ignore=tests/unit/cli/test_cli_issues_tdd_fix.py \
--ignore=tests/unit/cli/test_cli_response_parsing_errors.py \
--ignore=tests/unit/cli/test_cli_error_propagation_fixes.py \
--ignore=tests/unit/cli/test_jobs_cancel_status_command_tdd.py \
--ignore=tests/unit/cli/test_jobs_command_tdd.py \
--ignore=tests/unit/cli/test_repos_commands_tdd.py \
--ignore=tests/unit/cli/test_repository_activation_lifecycle.py \
--ignore=tests/unit/cli/test_repository_branch_switching.py \
--ignore=tests/unit/cli/test_repository_info_command.py \
--ignore=tests/unit/cli/test_resource_cleanup_verification.py \
--ignore=tests/unit/cli/test_authentication_status_management.py \
--ignore=tests/unit/cli/test_admin_repos_integration_validation.py \
--ignore=tests/unit/cli/test_daemon_delegation.py \
--ignore=tests/unit/cli/test_query_fts_flags.py \
--ignore=tests/unit/cli/test_staleness_display_integration.py \
--ignore=tests/unit/cli/test_start_stop_backend_integration.py \
--ignore=tests/unit/cli/test_cli_clear_temporal_progress.py \
--ignore=tests/unit/cli/test_cli_fast_path.py \
--ignore=tests/unit/cli/test_cli_temporal_display_comprehensive.py \
--ignore=tests/unit/cli/test_cli_temporal_display_story2_1.py \
--ignore=tests/unit/cli/test_improved_remote_query_experience.py \
--ignore=tests/unit/cli/test_path_pattern_performance.py \
--ignore=tests/unit/cli/test_status_temporal_performance.py \
--ignore=tests/unit/cli/test_index_commits_clear_bug.py \
--ignore=tests/unit/storage/test_filesystem_git_batch_limits.py \
--ignore=tests/unit/storage/test_hnsw_incremental_batch.py \
--ignore=tests/unit/remote/test_timeout_management.py \
--ignore=tests/unit/performance/test_exclusion_filter_performance.py \
--ignore=tests/unit/integration/ \
--ignore=tests/unit/documentation/test_fixed_size_chunking_documentation.py \
--ignore=tests/unit/cli/test_status_temporal_storage_size_bug.py \
--ignore=tests/unit/services/test_tantivy_language_filter.py \
--ignore=tests/unit/cli/test_index_delegation_progress.py \
--ignore=tests/unit/cli/test_cli_option_conflict_fix.py \
--ignore=tests/unit/test_codebase_audit_story9.py \
--ignore=tests/unit/daemon/test_display_timing_fix.py \
--ignore=tests/unit/services/test_clean_file_chunking_manager.py \
--ignore=tests/unit/services/test_file_chunking_manager.py \
--ignore=tests/unit/services/test_file_chunk_batching_optimization.py \
--ignore=tests/unit/services/test_daemon_fts_cache_performance.py \
--ignore=tests/unit/services/test_rpyc_daemon.py \
--ignore=tests/unit/services/test_voyage_threadpool_elimination.py \
--ignore=tests/unit/services/test_tantivy_regex_optimization.py \
--ignore=tests/unit/services/test_tantivy_path_filter.py \
--ignore=tests/unit/services/test_tantivy_limit_zero.py \
--ignore=tests/unit/services/test_tantivy_search.py \
--ignore=tests/unit/services/test_tantivy_regex_snippet_extraction.py \
--ignore=tests/unit/cli/test_admin_repos_functionality_verification.py \
--ignore=tests/unit/cli/test_admin_repos_maintenance_commands.py \
--ignore=tests/unit/cli/test_admin_repos_add_simple.py \
--ignore=tests/unit/cli/test_admin_repos_delete_command.py \
--ignore=tests/unit/cli/test_admin_repos_delete_integration_e2e.py \
--ignore=tests/unit/cli/test_password_management_commands.py \
--ignore=tests/unit/cli/test_admin_password_change_command.py \
--ignore=tests/unit/cli/test_repos_list_fix_verification.py \
--ignore=tests/unit/cli/test_system_health_commands.py \
--ignore=tests/unit/remote/test_network_error_handling.py \
--deselect=tests/unit/cli/test_adapted_command_behavior.py::TestAdaptedStatusCommand::test_status_command_routes_to_uninitialized_mode \
--deselect=tests/unit/proxy/test_parallel_executor.py::TestParallelCommandExecutor::test_execute_single_repository_success \
--deselect=tests/unit/chunking/test_fixed_size_chunker.py::TestFixedSizeChunker::test_edge_case_very_large_file \
--deselect=tests/unit/storage/test_filesystem_vector_store.py::TestProgressReporting::test_progress_callback_invoked_for_each_point \
--deselect=tests/unit/storage/test_filesystem_vector_store.py::TestFilesystemVectorStoreCore::test_batch_upsert_performance \
--deselect=tests/unit/storage/test_parallel_index_loading.py::TestPerformanceRequirements::test_parallel_execution_reduces_latency \
-m "not slow and not e2e and not real_api and not integration and not requires_server and not requires_containers" \
--cov=code_indexer \
--cov-report=xml --cov-report=term-missing \
2>&1 | tee "$TELEMETRY_FILE"
PYTEST_EXIT_CODE=$?
# TELEMETRY: Extract duration data
grep -E "^[0-9]+\.[0-9]+s (call|setup|teardown)" "$TELEMETRY_FILE" | sort -rn > "$DURATION_FILE"
# TELEMETRY: Summary
TOTAL_TIME=$(grep "passed in" "$TELEMETRY_FILE" | grep -oE "[0-9]+\.[0-9]+s" | head -1)
SLOW_TESTS=$(awk '$1 > 5.0' "$DURATION_FILE" | wc -l)
echo ""
echo "📊 TELEMETRY: Total=$TOTAL_TIME, Slow(>5s)=$SLOW_TESTS"
echo " Log: $TELEMETRY_FILE"
echo " Durations: $DURATION_FILE"
ln -sf "$(basename $TELEMETRY_FILE)" "$TELEMETRY_DIR/latest.log"
ln -sf "$(basename $DURATION_FILE)" "$TELEMETRY_DIR/latest-durations.txt"
if [ $PYTEST_EXIT_CODE -eq 0 ]; then
print_success "Fast unit tests passed"
else
print_error "Fast unit tests failed with exit code $PYTEST_EXIT_CODE"
exit $PYTEST_EXIT_CODE
fi
# Note: GitHub Actions also has version checking and publishing steps
# but those are only relevant for actual GitHub runs
# Summary
echo -e "\n${GREEN}🎉 Fast automation completed successfully!${NC}"
echo "==========================================="
echo "✅ Linting passed"
echo "✅ Formatting checked"
echo "✅ Type checking passed"
echo "✅ Fast unit tests passed"
echo ""
echo "🖥️ FAST test coverage (no external dependencies):"
echo " ✅ Core CLI parsing and validation"
echo " ✅ Configuration management and mode detection"
echo " ✅ Business logic without API calls"
echo " ✅ Text processing and chunking"
echo " ✅ Error handling and validation"
echo " ✅ Progress reporting and display logic"
echo ""
echo "🚫 EXCLUDED (for speed):"
echo " • Tests requiring real servers (test_*_real.py)"
echo " • Tests requiring containers (infrastructure, services)"
echo " • Tests requiring external APIs (VoyageAI, auth servers)"
echo " • Tests requiring special permissions (/var/lib access)"
echo " • Slow integration and e2e tests"
echo ""
echo "⚡ Fast automation focuses on pure unit tests only!"
echo "ℹ️ Run 'server-fast-automation.sh' for server tests with dependencies"
echo "ℹ️ Run 'full-automation.sh' for complete integration testing"
echo "CIDX core logic validated! 🚀"