230 lines
8.6 KiB
Python
230 lines
8.6 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test script to validate the response format of optimized endpoints.
|
|
This tests Requirements 4.1, 4.2, and 4.3 from the task specification.
|
|
"""
|
|
|
|
import requests
|
|
import json
|
|
import sys
|
|
|
|
API_BASE = 'http://localhost:8000/api'
|
|
PROJECT_ID = 1 # Adjust based on your test data
|
|
|
|
def test_shot_endpoints():
|
|
"""Test shot endpoints for embedded task_statuses field (Requirement 4.1)"""
|
|
print("=" * 60)
|
|
print("TESTING SHOT ENDPOINTS (Requirement 4.1)")
|
|
print("=" * 60)
|
|
|
|
try:
|
|
# Test shots list endpoint
|
|
url = f'{API_BASE}/shots/?project_id={PROJECT_ID}'
|
|
print(f"Testing: {url}")
|
|
|
|
response = requests.get(url)
|
|
if response.status_code != 200:
|
|
print(f"❌ FAIL: HTTP {response.status_code}")
|
|
return False
|
|
|
|
data = response.json()
|
|
if not data:
|
|
print("⚠️ INFO: No shots found in project - cannot test embedded data")
|
|
return True
|
|
|
|
shot = data[0]
|
|
print(f"✅ SUCCESS: Retrieved {len(data)} shots")
|
|
|
|
# Check for task_status field (Requirement 4.1)
|
|
if 'task_status' not in shot:
|
|
print("❌ FAIL: Shot response missing 'task_status' field")
|
|
return False
|
|
print("✅ PASS: Shot response includes 'task_status' field")
|
|
|
|
# Check for task_details field
|
|
if 'task_details' not in shot:
|
|
print("❌ FAIL: Shot response missing 'task_details' field")
|
|
return False
|
|
print("✅ PASS: Shot response includes 'task_details' field")
|
|
|
|
# Check task details structure (Requirement 4.3)
|
|
if shot['task_details']:
|
|
task_detail = shot['task_details'][0]
|
|
required_fields = ['task_type', 'status', 'task_id', 'assigned_user_id']
|
|
missing_fields = [field for field in required_fields if field not in task_detail]
|
|
|
|
if missing_fields:
|
|
print(f"❌ FAIL: Task details missing fields: {missing_fields}")
|
|
return False
|
|
print("✅ PASS: Task details include all required fields (task_type, status, task_id, assigned_user_id)")
|
|
|
|
# Display sample data
|
|
print(f"📊 Sample task_status: {json.dumps(shot['task_status'], indent=2)}")
|
|
print(f"📊 Sample task_details: {json.dumps(shot['task_details'][0], indent=2)}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ ERROR: {e}")
|
|
return False
|
|
|
|
def test_asset_endpoints():
|
|
"""Test asset endpoints for embedded task_statuses field (Requirement 4.2)"""
|
|
print("\n" + "=" * 60)
|
|
print("TESTING ASSET ENDPOINTS (Requirement 4.2)")
|
|
print("=" * 60)
|
|
|
|
try:
|
|
# Test assets list endpoint
|
|
url = f'{API_BASE}/assets/?project_id={PROJECT_ID}'
|
|
print(f"Testing: {url}")
|
|
|
|
response = requests.get(url)
|
|
if response.status_code != 200:
|
|
print(f"❌ FAIL: HTTP {response.status_code}")
|
|
return False
|
|
|
|
data = response.json()
|
|
if not data:
|
|
print("⚠️ INFO: No assets found in project - cannot test embedded data")
|
|
return True
|
|
|
|
asset = data[0]
|
|
print(f"✅ SUCCESS: Retrieved {len(data)} assets")
|
|
|
|
# Check for task_status field (Requirement 4.2)
|
|
if 'task_status' not in asset:
|
|
print("❌ FAIL: Asset response missing 'task_status' field")
|
|
return False
|
|
print("✅ PASS: Asset response includes 'task_status' field")
|
|
|
|
# Check for task_details field
|
|
if 'task_details' not in asset:
|
|
print("❌ FAIL: Asset response missing 'task_details' field")
|
|
return False
|
|
print("✅ PASS: Asset response includes 'task_details' field")
|
|
|
|
# Check task details structure (Requirement 4.3)
|
|
if asset['task_details']:
|
|
task_detail = asset['task_details'][0]
|
|
required_fields = ['task_type', 'status', 'task_id', 'assigned_user_id']
|
|
missing_fields = [field for field in required_fields if field not in task_detail]
|
|
|
|
if missing_fields:
|
|
print(f"❌ FAIL: Task details missing fields: {missing_fields}")
|
|
return False
|
|
print("✅ PASS: Task details include all required fields (task_type, status, task_id, assigned_user_id)")
|
|
|
|
# Display sample data
|
|
print(f"📊 Sample task_status: {json.dumps(asset['task_status'], indent=2)}")
|
|
print(f"📊 Sample task_details: {json.dumps(asset['task_details'][0], indent=2)}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ ERROR: {e}")
|
|
return False
|
|
|
|
def test_frontend_compatibility():
|
|
"""Test that frontend components can consume the optimized data format"""
|
|
print("\n" + "=" * 60)
|
|
print("TESTING FRONTEND COMPONENT COMPATIBILITY")
|
|
print("=" * 60)
|
|
|
|
try:
|
|
# Simulate TaskBrowser data extraction
|
|
shots_response = requests.get(f'{API_BASE}/shots/?project_id={PROJECT_ID}')
|
|
assets_response = requests.get(f'{API_BASE}/assets/?project_id={PROJECT_ID}')
|
|
|
|
if shots_response.status_code != 200 or assets_response.status_code != 200:
|
|
print("❌ FAIL: Could not fetch data for frontend compatibility test")
|
|
return False
|
|
|
|
shots = shots_response.json()
|
|
assets = assets_response.json()
|
|
|
|
# Simulate TaskBrowser task extraction
|
|
shot_tasks = []
|
|
for shot in shots:
|
|
if shot.get('task_details'):
|
|
for task_detail in shot['task_details']:
|
|
shot_tasks.append({
|
|
'id': task_detail.get('task_id', 0),
|
|
'name': f"{shot['name']} - {task_detail['task_type']}",
|
|
'task_type': task_detail['task_type'],
|
|
'status': task_detail['status'],
|
|
'shot_id': shot['id'],
|
|
'shot_name': shot['name']
|
|
})
|
|
|
|
asset_tasks = []
|
|
for asset in assets:
|
|
if asset.get('task_details'):
|
|
for task_detail in asset['task_details']:
|
|
asset_tasks.append({
|
|
'id': task_detail.get('task_id', 0),
|
|
'name': f"{asset['name']} - {task_detail['task_type']}",
|
|
'task_type': task_detail['task_type'],
|
|
'status': task_detail['status'],
|
|
'asset_id': asset['id'],
|
|
'asset_name': asset['name']
|
|
})
|
|
|
|
total_tasks = len(shot_tasks) + len(asset_tasks)
|
|
print(f"✅ SUCCESS: Extracted {total_tasks} tasks ({len(shot_tasks)} from shots, {len(asset_tasks)} from assets)")
|
|
|
|
# Test ShotDetailPanel simulation
|
|
if shots and shots[0].get('task_details'):
|
|
shot = shots[0]
|
|
tasks = []
|
|
for task_info in shot['task_details']:
|
|
tasks.append({
|
|
'id': task_info.get('task_id', 0),
|
|
'task_type': task_info['task_type'],
|
|
'status': task_info['status'],
|
|
'assigned_user_id': task_info.get('assigned_user_id')
|
|
})
|
|
print(f"✅ SUCCESS: ShotDetailPanel can transform {len(tasks)} tasks from embedded data")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ ERROR: {e}")
|
|
return False
|
|
|
|
def main():
|
|
"""Run all response format validation tests"""
|
|
print("🚀 STARTING RESPONSE FORMAT VALIDATION TESTS")
|
|
print(f"API Base: {API_BASE}")
|
|
print(f"Project ID: {PROJECT_ID}")
|
|
|
|
results = []
|
|
|
|
# Run all tests
|
|
results.append(("Shot Endpoints", test_shot_endpoints()))
|
|
results.append(("Asset Endpoints", test_asset_endpoints()))
|
|
results.append(("Frontend Compatibility", test_frontend_compatibility()))
|
|
|
|
# Summary
|
|
print("\n" + "=" * 60)
|
|
print("TEST SUMMARY")
|
|
print("=" * 60)
|
|
|
|
passed = sum(1 for _, result in results if result)
|
|
total = len(results)
|
|
|
|
for test_name, result in results:
|
|
status = "✅ PASS" if result else "❌ FAIL"
|
|
print(f"{status}: {test_name}")
|
|
|
|
print(f"\nOverall: {passed}/{total} tests passed")
|
|
|
|
if passed == total:
|
|
print("🎉 ALL TESTS PASSED - Response format validation successful!")
|
|
return 0
|
|
else:
|
|
print("⚠️ SOME TESTS FAILED - Response format needs attention")
|
|
return 1
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main()) |