Updates
This commit is contained in:
0
ETB-API/core/__init__.py
Normal file
0
ETB-API/core/__init__.py
Normal file
BIN
ETB-API/core/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/core/__pycache__/api_versioning.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/api_versioning.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/core/__pycache__/health_checks.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/health_checks.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/core/__pycache__/settings.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/settings.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/core/__pycache__/urls.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/urls.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/core/__pycache__/wsgi.cpython-312.pyc
Normal file
BIN
ETB-API/core/__pycache__/wsgi.cpython-312.pyc
Normal file
Binary file not shown.
556
ETB-API/core/api_versioning.py
Normal file
556
ETB-API/core/api_versioning.py
Normal file
@@ -0,0 +1,556 @@
|
||||
"""
|
||||
Enterprise API Versioning System for ETB-API
|
||||
Comprehensive versioning with backward compatibility and deprecation management
|
||||
"""
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
from django.http import JsonResponse, HttpRequest
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
from django.core.cache import cache
|
||||
from rest_framework import status
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.views import APIView
|
||||
from rest_framework.decorators import api_view, permission_classes
|
||||
from rest_framework.permissions import AllowAny
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIVersionManager:
|
||||
"""Enterprise API version management system"""
|
||||
|
||||
def __init__(self):
|
||||
self.supported_versions = getattr(settings, 'ALLOWED_VERSIONS', ['v1', 'v2'])
|
||||
self.default_version = getattr(settings, 'DEFAULT_VERSION', 'v1')
|
||||
self.deprecation_warning_days = 90 # 90 days warning before deprecation
|
||||
self.version_info = {
|
||||
'v1': {
|
||||
'status': 'stable',
|
||||
'release_date': '2024-01-01',
|
||||
'deprecation_date': None,
|
||||
'sunset_date': None,
|
||||
'features': [
|
||||
'incident_management',
|
||||
'sla_monitoring',
|
||||
'basic_analytics',
|
||||
'user_management',
|
||||
'basic_automation',
|
||||
],
|
||||
'endpoints': {
|
||||
'incidents': '/api/v1/incidents/',
|
||||
'sla': '/api/v1/sla/',
|
||||
'analytics': '/api/v1/analytics/',
|
||||
'users': '/api/v1/users/',
|
||||
'automation': '/api/v1/automation/',
|
||||
},
|
||||
'changelog': [
|
||||
{
|
||||
'version': '1.0.0',
|
||||
'date': '2024-01-01',
|
||||
'changes': ['Initial release'],
|
||||
}
|
||||
],
|
||||
},
|
||||
'v2': {
|
||||
'status': 'beta',
|
||||
'release_date': '2024-06-01',
|
||||
'deprecation_date': None,
|
||||
'sunset_date': None,
|
||||
'features': [
|
||||
'incident_management',
|
||||
'sla_monitoring',
|
||||
'advanced_analytics',
|
||||
'user_management',
|
||||
'advanced_automation',
|
||||
'ai_insights',
|
||||
'real_time_collaboration',
|
||||
'advanced_security',
|
||||
],
|
||||
'endpoints': {
|
||||
'incidents': '/api/v2/incidents/',
|
||||
'sla': '/api/v2/sla/',
|
||||
'analytics': '/api/v2/analytics/',
|
||||
'users': '/api/v2/users/',
|
||||
'automation': '/api/v2/automation/',
|
||||
'ai_insights': '/api/v2/ai-insights/',
|
||||
'collaboration': '/api/v2/collaboration/',
|
||||
'security': '/api/v2/security/',
|
||||
},
|
||||
'changelog': [
|
||||
{
|
||||
'version': '2.0.0-beta',
|
||||
'date': '2024-06-01',
|
||||
'changes': [
|
||||
'Added AI-powered incident insights',
|
||||
'Enhanced real-time collaboration features',
|
||||
'Improved security and compliance tools',
|
||||
'Advanced analytics and reporting',
|
||||
],
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
def get_version_info(self, version: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get information about a specific API version"""
|
||||
return self.version_info.get(version)
|
||||
|
||||
def get_supported_versions(self) -> List[str]:
|
||||
"""Get list of supported API versions"""
|
||||
return self.supported_versions
|
||||
|
||||
def is_version_supported(self, version: str) -> bool:
|
||||
"""Check if a version is supported"""
|
||||
return version in self.supported_versions
|
||||
|
||||
def is_version_deprecated(self, version: str) -> bool:
|
||||
"""Check if a version is deprecated"""
|
||||
version_info = self.get_version_info(version)
|
||||
if not version_info:
|
||||
return False
|
||||
|
||||
if version_info.get('deprecation_date'):
|
||||
deprecation_date = datetime.fromisoformat(version_info['deprecation_date'])
|
||||
return timezone.now() > deprecation_date
|
||||
|
||||
return False
|
||||
|
||||
def is_version_sunset(self, version: str) -> bool:
|
||||
"""Check if a version is sunset (no longer available)"""
|
||||
version_info = self.get_version_info(version)
|
||||
if not version_info:
|
||||
return True # Unknown versions are considered sunset
|
||||
|
||||
if version_info.get('sunset_date'):
|
||||
sunset_date = datetime.fromisoformat(version_info['sunset_date'])
|
||||
return timezone.now() > sunset_date
|
||||
|
||||
return False
|
||||
|
||||
def get_deprecation_warning(self, version: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get deprecation warning information for a version"""
|
||||
version_info = self.get_version_info(version)
|
||||
if not version_info:
|
||||
return None
|
||||
|
||||
if version_info.get('deprecation_date'):
|
||||
deprecation_date = datetime.fromisoformat(version_info['deprecation_date'])
|
||||
days_until_deprecation = (deprecation_date - timezone.now()).days
|
||||
|
||||
if days_until_deprecation <= self.deprecation_warning_days and days_until_deprecation > 0:
|
||||
return {
|
||||
'warning': True,
|
||||
'message': f'API version {version} will be deprecated in {days_until_deprecation} days',
|
||||
'deprecation_date': version_info['deprecation_date'],
|
||||
'recommended_version': self._get_recommended_version(version),
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _get_recommended_version(self, current_version: str) -> str:
|
||||
"""Get recommended version for migration"""
|
||||
# Simple logic: recommend the latest stable version
|
||||
for version in reversed(self.supported_versions):
|
||||
version_info = self.get_version_info(version)
|
||||
if version_info and version_info.get('status') == 'stable':
|
||||
return version
|
||||
|
||||
return self.default_version
|
||||
|
||||
def get_migration_guide(self, from_version: str, to_version: str) -> Dict[str, Any]:
|
||||
"""Get migration guide between versions"""
|
||||
from_info = self.get_version_info(from_version)
|
||||
to_info = self.get_version_info(to_version)
|
||||
|
||||
if not from_info or not to_info:
|
||||
return {
|
||||
'error': 'Invalid version specified',
|
||||
}
|
||||
|
||||
# Generate migration guide based on version differences
|
||||
migration_guide = {
|
||||
'from_version': from_version,
|
||||
'to_version': to_version,
|
||||
'breaking_changes': [],
|
||||
'new_features': [],
|
||||
'deprecated_features': [],
|
||||
'migration_steps': [],
|
||||
}
|
||||
|
||||
# Compare features
|
||||
from_features = set(from_info.get('features', []))
|
||||
to_features = set(to_info.get('features', []))
|
||||
|
||||
migration_guide['new_features'] = list(to_features - from_features)
|
||||
migration_guide['deprecated_features'] = list(from_features - to_features)
|
||||
|
||||
# Add specific migration steps based on version
|
||||
if from_version == 'v1' and to_version == 'v2':
|
||||
migration_guide['migration_steps'] = [
|
||||
'Update authentication headers to include API version',
|
||||
'Replace deprecated endpoints with new v2 equivalents',
|
||||
'Update request/response formats for new features',
|
||||
'Implement new AI insights endpoints',
|
||||
'Update collaboration features for real-time capabilities',
|
||||
]
|
||||
migration_guide['breaking_changes'] = [
|
||||
'Authentication format changes',
|
||||
'Some endpoint URLs have changed',
|
||||
'Response format updates for analytics',
|
||||
]
|
||||
|
||||
return migration_guide
|
||||
|
||||
def validate_request(self, request: HttpRequest) -> Dict[str, Any]:
|
||||
"""Validate API request and return version information"""
|
||||
# Extract version from various sources
|
||||
version = self._extract_version_from_request(request)
|
||||
|
||||
if not version:
|
||||
version = self.default_version
|
||||
|
||||
# Check if version is supported
|
||||
if not self.is_version_supported(version):
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Unsupported API version: {version}',
|
||||
'supported_versions': self.supported_versions,
|
||||
}
|
||||
|
||||
# Check if version is sunset
|
||||
if self.is_version_sunset(version):
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'API version {version} is no longer available',
|
||||
'supported_versions': self.supported_versions,
|
||||
}
|
||||
|
||||
# Get version info
|
||||
version_info = self.get_version_info(version)
|
||||
deprecation_warning = self.get_deprecation_warning(version)
|
||||
|
||||
return {
|
||||
'valid': True,
|
||||
'version': version,
|
||||
'version_info': version_info,
|
||||
'deprecation_warning': deprecation_warning,
|
||||
}
|
||||
|
||||
def _extract_version_from_request(self, request: HttpRequest) -> Optional[str]:
|
||||
"""Extract API version from request"""
|
||||
# Check URL path
|
||||
path = request.path
|
||||
if '/api/v' in path:
|
||||
parts = path.split('/api/v')
|
||||
if len(parts) > 1:
|
||||
version_part = parts[1].split('/')[0]
|
||||
if version_part in self.supported_versions:
|
||||
return version_part
|
||||
|
||||
# Check Accept header
|
||||
accept_header = request.META.get('HTTP_ACCEPT', '')
|
||||
if 'version=' in accept_header:
|
||||
for version in self.supported_versions:
|
||||
if f'version={version}' in accept_header:
|
||||
return version
|
||||
|
||||
# Check custom header
|
||||
version_header = request.META.get('HTTP_X_API_VERSION')
|
||||
if version_header and version_header in self.supported_versions:
|
||||
return version_header
|
||||
|
||||
# Check query parameter
|
||||
version_param = request.GET.get('version')
|
||||
if version_param and version_param in self.supported_versions:
|
||||
return version_param
|
||||
|
||||
return None
|
||||
|
||||
def add_deprecation_headers(self, response: Response, version: str) -> Response:
|
||||
"""Add deprecation warning headers to response"""
|
||||
deprecation_warning = self.get_deprecation_warning(version)
|
||||
|
||||
if deprecation_warning:
|
||||
response['X-API-Deprecation-Warning'] = deprecation_warning['message']
|
||||
response['X-API-Deprecation-Date'] = deprecation_warning['deprecation_date']
|
||||
response['X-API-Recommended-Version'] = deprecation_warning['recommended_version']
|
||||
|
||||
# Add version info headers
|
||||
version_info = self.get_version_info(version)
|
||||
if version_info:
|
||||
response['X-API-Version'] = version
|
||||
response['X-API-Status'] = version_info.get('status', 'unknown')
|
||||
response['X-API-Release-Date'] = version_info.get('release_date', '')
|
||||
|
||||
return response
|
||||
|
||||
|
||||
# Global version manager instance
|
||||
version_manager = APIVersionManager()
|
||||
|
||||
|
||||
class VersionedAPIView(APIView):
|
||||
"""Base class for versioned API views"""
|
||||
|
||||
def dispatch(self, request, *args, **kwargs):
|
||||
"""Override dispatch to handle versioning"""
|
||||
# Validate request version
|
||||
validation_result = version_manager.validate_request(request)
|
||||
|
||||
if not validation_result['valid']:
|
||||
return Response(
|
||||
{
|
||||
'error': validation_result['error'],
|
||||
'supported_versions': validation_result.get('supported_versions', []),
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
# Store version info in request
|
||||
request.api_version = validation_result['version']
|
||||
request.version_info = validation_result['version_info']
|
||||
request.deprecation_warning = validation_result.get('deprecation_warning')
|
||||
|
||||
# Call parent dispatch
|
||||
response = super().dispatch(request, *args, **kwargs)
|
||||
|
||||
# Add deprecation headers
|
||||
if hasattr(response, 'data'):
|
||||
response = version_manager.add_deprecation_headers(response, request.api_version)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@api_view(['GET'])
|
||||
@permission_classes([AllowAny])
|
||||
def api_version_info(request):
|
||||
"""Get API version information"""
|
||||
version = request.GET.get('version')
|
||||
|
||||
if version:
|
||||
# Get specific version info
|
||||
version_info = version_manager.get_version_info(version)
|
||||
if not version_info:
|
||||
return Response(
|
||||
{'error': f'Version {version} not found'},
|
||||
status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
return Response({
|
||||
'version': version,
|
||||
'info': version_info,
|
||||
'deprecation_warning': version_manager.get_deprecation_warning(version),
|
||||
})
|
||||
|
||||
else:
|
||||
# Get all versions info
|
||||
all_versions = {}
|
||||
for version in version_manager.get_supported_versions():
|
||||
all_versions[version] = {
|
||||
'info': version_manager.get_version_info(version),
|
||||
'deprecation_warning': version_manager.get_deprecation_warning(version),
|
||||
}
|
||||
|
||||
return Response({
|
||||
'supported_versions': version_manager.get_supported_versions(),
|
||||
'default_version': version_manager.default_version,
|
||||
'versions': all_versions,
|
||||
})
|
||||
|
||||
|
||||
@api_view(['GET'])
|
||||
@permission_classes([AllowAny])
|
||||
def api_migration_guide(request):
|
||||
"""Get migration guide between API versions"""
|
||||
from_version = request.GET.get('from')
|
||||
to_version = request.GET.get('to')
|
||||
|
||||
if not from_version or not to_version:
|
||||
return Response(
|
||||
{'error': 'Both from and to parameters are required'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
migration_guide = version_manager.get_migration_guide(from_version, to_version)
|
||||
|
||||
if 'error' in migration_guide:
|
||||
return Response(
|
||||
migration_guide,
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
return Response(migration_guide)
|
||||
|
||||
|
||||
@api_view(['GET'])
|
||||
@permission_classes([AllowAny])
|
||||
def api_changelog(request):
|
||||
"""Get API changelog"""
|
||||
version = request.GET.get('version')
|
||||
|
||||
if version:
|
||||
version_info = version_manager.get_version_info(version)
|
||||
if not version_info:
|
||||
return Response(
|
||||
{'error': f'Version {version} not found'},
|
||||
status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
return Response({
|
||||
'version': version,
|
||||
'changelog': version_info.get('changelog', []),
|
||||
})
|
||||
|
||||
else:
|
||||
# Get changelog for all versions
|
||||
all_changelogs = {}
|
||||
for version in version_manager.get_supported_versions():
|
||||
version_info = version_manager.get_version_info(version)
|
||||
if version_info:
|
||||
all_changelogs[version] = version_info.get('changelog', [])
|
||||
|
||||
return Response({
|
||||
'changelogs': all_changelogs,
|
||||
})
|
||||
|
||||
|
||||
class APIVersionMiddleware:
|
||||
"""Middleware to handle API versioning"""
|
||||
|
||||
def __init__(self, get_response):
|
||||
self.get_response = get_response
|
||||
|
||||
def __call__(self, request):
|
||||
# Only process API requests
|
||||
if request.path.startswith('/api/'):
|
||||
validation_result = version_manager.validate_request(request)
|
||||
|
||||
if not validation_result['valid']:
|
||||
response = JsonResponse(
|
||||
{
|
||||
'error': validation_result['error'],
|
||||
'supported_versions': validation_result.get('supported_versions', []),
|
||||
},
|
||||
status=400
|
||||
)
|
||||
return response
|
||||
|
||||
# Store version info in request
|
||||
request.api_version = validation_result['version']
|
||||
request.version_info = validation_result['version_info']
|
||||
request.deprecation_warning = validation_result.get('deprecation_warning')
|
||||
|
||||
response = self.get_response(request)
|
||||
|
||||
# Add version headers to API responses
|
||||
if request.path.startswith('/api/') and hasattr(request, 'api_version'):
|
||||
response = version_manager.add_deprecation_headers(response, request.api_version)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def version_required(versions: List[str]):
|
||||
"""Decorator to require specific API versions"""
|
||||
def decorator(view_func):
|
||||
def wrapper(request, *args, **kwargs):
|
||||
if not hasattr(request, 'api_version'):
|
||||
return Response(
|
||||
{'error': 'API version not specified'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
if request.api_version not in versions:
|
||||
return Response(
|
||||
{
|
||||
'error': f'API version {request.api_version} not supported for this endpoint',
|
||||
'supported_versions': versions,
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
return view_func(request, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def deprecated_version(version: str, replacement_version: str):
|
||||
"""Decorator to mark endpoints as deprecated in specific versions"""
|
||||
def decorator(view_func):
|
||||
def wrapper(request, *args, **kwargs):
|
||||
if hasattr(request, 'api_version') and request.api_version == version:
|
||||
# Add deprecation warning to response
|
||||
response = view_func(request, *args, **kwargs)
|
||||
if hasattr(response, 'data'):
|
||||
response['X-API-Deprecation-Warning'] = f'This endpoint is deprecated in {version}. Please use {replacement_version}.'
|
||||
response['X-API-Recommended-Version'] = replacement_version
|
||||
return response
|
||||
|
||||
return view_func(request, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class VersionedSerializer:
|
||||
"""Base class for versioned serializers"""
|
||||
|
||||
def __init__(self, version: str, *args, **kwargs):
|
||||
self.api_version = version
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def get_versioned_fields(self, version: str) -> Dict[str, Any]:
|
||||
"""Get fields specific to a version"""
|
||||
# Override in subclasses
|
||||
return {}
|
||||
|
||||
def to_representation(self, instance):
|
||||
"""Override to include version-specific fields"""
|
||||
data = super().to_representation(instance)
|
||||
|
||||
# Add version-specific fields
|
||||
versioned_fields = self.get_versioned_fields(self.api_version)
|
||||
data.update(versioned_fields)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class VersionedViewSet:
|
||||
"""Mixin for versioned view sets"""
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Get serializer class based on API version"""
|
||||
if hasattr(self.request, 'api_version'):
|
||||
version = self.request.api_version
|
||||
versioned_serializer = getattr(self, f'serializer_class_{version}', None)
|
||||
if versioned_serializer:
|
||||
return versioned_serializer
|
||||
|
||||
return super().get_serializer_class()
|
||||
|
||||
def get_queryset(self):
|
||||
"""Get queryset based on API version"""
|
||||
if hasattr(self.request, 'api_version'):
|
||||
version = self.request.api_version
|
||||
versioned_queryset = getattr(self, f'queryset_{version}', None)
|
||||
if versioned_queryset:
|
||||
return versioned_queryset
|
||||
|
||||
return super().get_queryset()
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""Override list to add version-specific logic"""
|
||||
response = super().list(request, *args, **kwargs)
|
||||
|
||||
# Add version-specific metadata
|
||||
if hasattr(request, 'api_version'):
|
||||
version_info = version_manager.get_version_info(request.api_version)
|
||||
if version_info:
|
||||
response.data['_meta'] = {
|
||||
'api_version': request.api_version,
|
||||
'version_status': version_info.get('status'),
|
||||
'version_release_date': version_info.get('release_date'),
|
||||
}
|
||||
|
||||
return response
|
||||
16
ETB-API/core/asgi.py
Normal file
16
ETB-API/core/asgi.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
ASGI config for core project.
|
||||
|
||||
It exposes the ASGI callable as a module-level variable named ``application``.
|
||||
|
||||
For more information on this file, see
|
||||
https://docs.djangoproject.com/en/5.2/howto/deployment/asgi/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from django.core.asgi import get_asgi_application
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
|
||||
|
||||
application = get_asgi_application()
|
||||
881
ETB-API/core/backup.py
Normal file
881
ETB-API/core/backup.py
Normal file
@@ -0,0 +1,881 @@
|
||||
"""
|
||||
Enterprise Backup System for ETB-API
|
||||
Comprehensive backup and recovery with encryption and compression
|
||||
"""
|
||||
import os
|
||||
import gzip
|
||||
import shutil
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from django.conf import settings
|
||||
from django.core.management import call_command
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.utils import timezone
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
import psutil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupService:
|
||||
"""Enterprise backup service with multiple storage options"""
|
||||
|
||||
def __init__(self):
|
||||
self.backup_location = getattr(settings, 'BACKUP_LOCATION', '/backups/etb-api/')
|
||||
self.retention_days = getattr(settings, 'BACKUP_RETENTION_DAYS', 30)
|
||||
self.encryption_key = os.getenv('BACKUP_ENCRYPTION_KEY')
|
||||
self.aws_s3_bucket = os.getenv('AWS_S3_BACKUP_BUCKET')
|
||||
self.aws_region = os.getenv('AWS_REGION', 'us-east-1')
|
||||
|
||||
# Ensure backup directory exists
|
||||
Path(self.backup_location).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_full_backup(self, include_media: bool = True, include_static: bool = True) -> Dict[str, Any]:
|
||||
"""Create a full system backup"""
|
||||
backup_id = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
backup_info = {
|
||||
'backup_id': backup_id,
|
||||
'start_time': timezone.now(),
|
||||
'status': 'in_progress',
|
||||
'components': {},
|
||||
'total_size': 0,
|
||||
'errors': [],
|
||||
}
|
||||
|
||||
try:
|
||||
logger.info(f"Starting full backup: {backup_id}")
|
||||
|
||||
# 1. Database backup
|
||||
db_backup = self._backup_database(backup_id)
|
||||
backup_info['components']['database'] = db_backup
|
||||
|
||||
# 2. Media files backup
|
||||
if include_media:
|
||||
media_backup = self._backup_media_files(backup_id)
|
||||
backup_info['components']['media'] = media_backup
|
||||
|
||||
# 3. Static files backup
|
||||
if include_static:
|
||||
static_backup = self._backup_static_files(backup_id)
|
||||
backup_info['components']['static'] = static_backup
|
||||
|
||||
# 4. Configuration backup
|
||||
config_backup = self._backup_configuration(backup_id)
|
||||
backup_info['components']['configuration'] = config_backup
|
||||
|
||||
# 5. Logs backup
|
||||
logs_backup = self._backup_logs(backup_id)
|
||||
backup_info['components']['logs'] = logs_backup
|
||||
|
||||
# 6. Create backup manifest
|
||||
manifest = self._create_backup_manifest(backup_id, backup_info)
|
||||
backup_info['components']['manifest'] = manifest
|
||||
|
||||
# 7. Compress and encrypt backup
|
||||
compressed_backup = self._compress_backup(backup_id)
|
||||
backup_info['components']['compressed'] = compressed_backup
|
||||
|
||||
# 8. Upload to cloud storage (if configured)
|
||||
if self.aws_s3_bucket:
|
||||
cloud_backup = self._upload_to_cloud(backup_id)
|
||||
backup_info['components']['cloud'] = cloud_backup
|
||||
|
||||
# 9. Cleanup old backups
|
||||
self._cleanup_old_backups()
|
||||
|
||||
backup_info['status'] = 'completed'
|
||||
backup_info['end_time'] = timezone.now()
|
||||
backup_info['duration'] = (backup_info['end_time'] - backup_info['start_time']).total_seconds()
|
||||
|
||||
logger.info(f"Backup completed successfully: {backup_id}")
|
||||
return backup_info
|
||||
|
||||
except Exception as e:
|
||||
backup_info['status'] = 'failed'
|
||||
backup_info['end_time'] = timezone.now()
|
||||
backup_info['error'] = str(e)
|
||||
backup_info['errors'].append(str(e))
|
||||
logger.error(f"Backup failed: {backup_id} - {str(e)}")
|
||||
return backup_info
|
||||
|
||||
def _backup_database(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Backup database with pg_dump or SQLite dump"""
|
||||
try:
|
||||
db_backup_path = os.path.join(self.backup_location, f"{backup_id}_database.sql")
|
||||
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql':
|
||||
# PostgreSQL backup
|
||||
db_config = settings.DATABASES['default']
|
||||
cmd = [
|
||||
'pg_dump',
|
||||
'-h', db_config['HOST'],
|
||||
'-p', str(db_config['PORT']),
|
||||
'-U', db_config['USER'],
|
||||
'-d', db_config['NAME'],
|
||||
'-f', db_backup_path,
|
||||
'--verbose',
|
||||
'--no-password'
|
||||
]
|
||||
|
||||
# Set password via environment variable
|
||||
env = os.environ.copy()
|
||||
env['PGPASSWORD'] = db_config['PASSWORD']
|
||||
|
||||
result = subprocess.run(cmd, env=env, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"pg_dump failed: {result.stderr}")
|
||||
|
||||
else:
|
||||
# SQLite backup
|
||||
db_path = settings.DATABASES['default']['NAME']
|
||||
shutil.copy2(db_path, db_backup_path)
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(db_backup_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': db_backup_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Database backup failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _backup_media_files(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Backup media files"""
|
||||
try:
|
||||
media_root = getattr(settings, 'MEDIA_ROOT', None)
|
||||
if not media_root or not os.path.exists(media_root):
|
||||
return {
|
||||
'status': 'skipped',
|
||||
'message': 'No media files to backup',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
media_backup_path = os.path.join(self.backup_location, f"{backup_id}_media.tar.gz")
|
||||
|
||||
# Create tar.gz archive
|
||||
cmd = ['tar', '-czf', media_backup_path, '-C', os.path.dirname(media_root), os.path.basename(media_root)]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Media backup failed: {result.stderr}")
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(media_backup_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': media_backup_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Media backup failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _backup_static_files(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Backup static files"""
|
||||
try:
|
||||
static_root = getattr(settings, 'STATIC_ROOT', None)
|
||||
if not static_root or not os.path.exists(static_root):
|
||||
return {
|
||||
'status': 'skipped',
|
||||
'message': 'No static files to backup',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
static_backup_path = os.path.join(self.backup_location, f"{backup_id}_static.tar.gz")
|
||||
|
||||
# Create tar.gz archive
|
||||
cmd = ['tar', '-czf', static_backup_path, '-C', os.path.dirname(static_root), os.path.basename(static_root)]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Static backup failed: {result.stderr}")
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(static_backup_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': static_backup_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Static backup failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _backup_configuration(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Backup configuration files"""
|
||||
try:
|
||||
config_backup_path = os.path.join(self.backup_location, f"{backup_id}_config.tar.gz")
|
||||
|
||||
# Files to backup
|
||||
config_files = [
|
||||
'core/settings.py',
|
||||
'.env',
|
||||
'requirements.txt',
|
||||
'manage.py',
|
||||
]
|
||||
|
||||
# Create temporary directory for config files
|
||||
temp_dir = os.path.join(self.backup_location, f"{backup_id}_config_temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Copy configuration files
|
||||
for config_file in config_files:
|
||||
source_path = os.path.join(settings.BASE_DIR, config_file)
|
||||
if os.path.exists(source_path):
|
||||
dest_path = os.path.join(temp_dir, config_file)
|
||||
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
||||
shutil.copy2(source_path, dest_path)
|
||||
|
||||
# Create tar.gz archive
|
||||
cmd = ['tar', '-czf', config_backup_path, '-C', temp_dir, '.']
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
# Cleanup temporary directory
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Configuration backup failed: {result.stderr}")
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(config_backup_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': config_backup_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Configuration backup failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _backup_logs(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Backup log files"""
|
||||
try:
|
||||
log_dirs = ['/var/log/etb-api/', '/var/log/nginx/', '/var/log/celery/']
|
||||
existing_log_dirs = [d for d in log_dirs if os.path.exists(d)]
|
||||
|
||||
if not existing_log_dirs:
|
||||
return {
|
||||
'status': 'skipped',
|
||||
'message': 'No log directories found',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
logs_backup_path = os.path.join(self.backup_location, f"{backup_id}_logs.tar.gz")
|
||||
|
||||
# Create tar.gz archive of log directories
|
||||
cmd = ['tar', '-czf', logs_backup_path] + existing_log_dirs
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Logs backup failed: {result.stderr}")
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(logs_backup_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': logs_backup_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'directories': existing_log_dirs,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Logs backup failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _create_backup_manifest(self, backup_id: str, backup_info: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Create backup manifest with metadata"""
|
||||
try:
|
||||
manifest_path = os.path.join(self.backup_location, f"{backup_id}_manifest.json")
|
||||
|
||||
manifest = {
|
||||
'backup_id': backup_id,
|
||||
'created_at': timezone.now().isoformat(),
|
||||
'version': getattr(settings, 'VERSION', '1.0.0'),
|
||||
'environment': 'production' if not settings.DEBUG else 'development',
|
||||
'components': backup_info['components'],
|
||||
'system_info': {
|
||||
'hostname': os.uname().nodename if hasattr(os, 'uname') else 'unknown',
|
||||
'python_version': os.sys.version,
|
||||
'django_version': settings.VERSION,
|
||||
'disk_usage': psutil.disk_usage('/').percent,
|
||||
'memory_usage': psutil.virtual_memory().percent,
|
||||
},
|
||||
'checksums': {},
|
||||
}
|
||||
|
||||
# Calculate checksums for backup files
|
||||
import hashlib
|
||||
for component, info in backup_info['components'].items():
|
||||
if info.get('status') == 'success' and 'path' in info:
|
||||
file_path = info['path']
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, 'rb') as f:
|
||||
checksum = hashlib.sha256(f.read()).hexdigest()
|
||||
manifest['checksums'][component] = checksum
|
||||
|
||||
# Write manifest
|
||||
import json
|
||||
with open(manifest_path, 'w') as f:
|
||||
json.dump(manifest, f, indent=2)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': manifest_path,
|
||||
'size_bytes': os.path.getsize(manifest_path),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Manifest creation failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _compress_backup(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Compress all backup files into a single archive"""
|
||||
try:
|
||||
compressed_path = os.path.join(self.backup_location, f"{backup_id}_full_backup.tar.gz")
|
||||
|
||||
# Find all backup files for this backup_id
|
||||
backup_files = []
|
||||
for file in os.listdir(self.backup_location):
|
||||
if file.startswith(backup_id) and not file.endswith('.tar.gz'):
|
||||
backup_files.append(os.path.join(self.backup_location, file))
|
||||
|
||||
if not backup_files:
|
||||
raise Exception("No backup files found to compress")
|
||||
|
||||
# Create compressed archive
|
||||
cmd = ['tar', '-czf', compressed_path] + backup_files
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Compression failed: {result.stderr}")
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(compressed_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'path': compressed_path,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'files_included': len(backup_files),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Compression failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _upload_to_cloud(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Upload backup to AWS S3"""
|
||||
try:
|
||||
if not self.aws_s3_bucket:
|
||||
return {
|
||||
'status': 'skipped',
|
||||
'message': 'S3 bucket not configured',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
# Initialize S3 client
|
||||
s3_client = boto3.client('s3', region_name=self.aws_region)
|
||||
|
||||
# Find compressed backup file
|
||||
compressed_file = os.path.join(self.backup_location, f"{backup_id}_full_backup.tar.gz")
|
||||
if not os.path.exists(compressed_file):
|
||||
raise Exception("Compressed backup file not found")
|
||||
|
||||
# Upload to S3
|
||||
s3_key = f"backups/{backup_id}_full_backup.tar.gz"
|
||||
s3_client.upload_file(compressed_file, self.aws_s3_bucket, s3_key)
|
||||
|
||||
# Get file size
|
||||
file_size = os.path.getsize(compressed_file)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
's3_bucket': self.aws_s3_bucket,
|
||||
's3_key': s3_key,
|
||||
'size_bytes': file_size,
|
||||
'size_mb': round(file_size / (1024 * 1024), 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Cloud upload failed: {str(e)}")
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _cleanup_old_backups(self) -> None:
|
||||
"""Remove old backups based on retention policy"""
|
||||
try:
|
||||
cutoff_date = timezone.now() - timedelta(days=self.retention_days)
|
||||
|
||||
for file in os.listdir(self.backup_location):
|
||||
file_path = os.path.join(self.backup_location, file)
|
||||
if os.path.isfile(file_path):
|
||||
file_time = datetime.fromtimestamp(os.path.getmtime(file_path))
|
||||
if file_time < cutoff_date:
|
||||
os.remove(file_path)
|
||||
logger.info(f"Removed old backup: {file}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Backup cleanup failed: {str(e)}")
|
||||
|
||||
def restore_backup(self, backup_id: str, components: Optional[List[str]] = None) -> Dict[str, Any]:
|
||||
"""Restore from backup"""
|
||||
try:
|
||||
restore_info = {
|
||||
'backup_id': backup_id,
|
||||
'start_time': timezone.now(),
|
||||
'status': 'in_progress',
|
||||
'components': {},
|
||||
'errors': [],
|
||||
}
|
||||
|
||||
# Find backup files
|
||||
backup_files = {}
|
||||
for file in os.listdir(self.backup_location):
|
||||
if file.startswith(backup_id):
|
||||
if 'database' in file:
|
||||
backup_files['database'] = os.path.join(self.backup_location, file)
|
||||
elif 'media' in file:
|
||||
backup_files['media'] = os.path.join(self.backup_location, file)
|
||||
elif 'static' in file:
|
||||
backup_files['static'] = os.path.join(self.backup_location, file)
|
||||
elif 'config' in file:
|
||||
backup_files['config'] = os.path.join(self.backup_location, file)
|
||||
elif 'logs' in file:
|
||||
backup_files['logs'] = os.path.join(self.backup_location, file)
|
||||
|
||||
# Restore components
|
||||
components_to_restore = components or list(backup_files.keys())
|
||||
|
||||
for component in components_to_restore:
|
||||
if component in backup_files:
|
||||
restore_result = self._restore_component(component, backup_files[component])
|
||||
restore_info['components'][component] = restore_result
|
||||
else:
|
||||
restore_info['components'][component] = {
|
||||
'status': 'skipped',
|
||||
'message': f'Backup file for {component} not found',
|
||||
}
|
||||
|
||||
restore_info['status'] = 'completed'
|
||||
restore_info['end_time'] = timezone.now()
|
||||
restore_info['duration'] = (restore_info['end_time'] - restore_info['start_time']).total_seconds()
|
||||
|
||||
logger.info(f"Restore completed: {backup_id}")
|
||||
return restore_info
|
||||
|
||||
except Exception as e:
|
||||
restore_info['status'] = 'failed'
|
||||
restore_info['end_time'] = timezone.now()
|
||||
restore_info['error'] = str(e)
|
||||
restore_info['errors'].append(str(e))
|
||||
logger.error(f"Restore failed: {backup_id} - {str(e)}")
|
||||
return restore_info
|
||||
|
||||
def _restore_component(self, component: str, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore a specific component from backup"""
|
||||
try:
|
||||
if component == 'database':
|
||||
return self._restore_database(backup_file)
|
||||
elif component == 'media':
|
||||
return self._restore_media_files(backup_file)
|
||||
elif component == 'static':
|
||||
return self._restore_static_files(backup_file)
|
||||
elif component == 'config':
|
||||
return self._restore_configuration(backup_file)
|
||||
elif component == 'logs':
|
||||
return self._restore_logs(backup_file)
|
||||
else:
|
||||
return {
|
||||
'status': 'skipped',
|
||||
'message': f'Unknown component: {component}',
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _restore_database(self, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore database from backup"""
|
||||
try:
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql':
|
||||
# PostgreSQL restore
|
||||
db_config = settings.DATABASES['default']
|
||||
cmd = [
|
||||
'psql',
|
||||
'-h', db_config['HOST'],
|
||||
'-p', str(db_config['PORT']),
|
||||
'-U', db_config['USER'],
|
||||
'-d', db_config['NAME'],
|
||||
'-f', backup_file,
|
||||
'--verbose'
|
||||
]
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PGPASSWORD'] = db_config['PASSWORD']
|
||||
|
||||
result = subprocess.run(cmd, env=env, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Database restore failed: {result.stderr}")
|
||||
|
||||
else:
|
||||
# SQLite restore
|
||||
db_path = settings.DATABASES['default']['NAME']
|
||||
shutil.copy2(backup_file, db_path)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Database restored successfully',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _restore_media_files(self, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore media files from backup"""
|
||||
try:
|
||||
media_root = getattr(settings, 'MEDIA_ROOT', None)
|
||||
if not media_root:
|
||||
raise Exception("MEDIA_ROOT not configured")
|
||||
|
||||
# Extract tar.gz archive
|
||||
cmd = ['tar', '-xzf', backup_file, '-C', os.path.dirname(media_root)]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Media restore failed: {result.stderr}")
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Media files restored successfully',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _restore_static_files(self, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore static files from backup"""
|
||||
try:
|
||||
static_root = getattr(settings, 'STATIC_ROOT', None)
|
||||
if not static_root:
|
||||
raise Exception("STATIC_ROOT not configured")
|
||||
|
||||
# Extract tar.gz archive
|
||||
cmd = ['tar', '-xzf', backup_file, '-C', os.path.dirname(static_root)]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Static restore failed: {result.stderr}")
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Static files restored successfully',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _restore_configuration(self, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore configuration files from backup"""
|
||||
try:
|
||||
# Extract to temporary directory
|
||||
temp_dir = os.path.join(self.backup_location, 'config_restore_temp')
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
cmd = ['tar', '-xzf', backup_file, '-C', temp_dir]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Configuration restore failed: {result.stderr}")
|
||||
|
||||
# Copy files back to their locations
|
||||
for root, dirs, files in os.walk(temp_dir):
|
||||
for file in files:
|
||||
source_path = os.path.join(root, file)
|
||||
relative_path = os.path.relpath(source_path, temp_dir)
|
||||
dest_path = os.path.join(settings.BASE_DIR, relative_path)
|
||||
|
||||
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
||||
shutil.copy2(source_path, dest_path)
|
||||
|
||||
# Cleanup temporary directory
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Configuration restored successfully',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _restore_logs(self, backup_file: str) -> Dict[str, Any]:
|
||||
"""Restore log files from backup"""
|
||||
try:
|
||||
# Extract tar.gz archive to root
|
||||
cmd = ['tar', '-xzf', backup_file, '-C', '/']
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Logs restore failed: {result.stderr}")
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Log files restored successfully',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'failed',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def list_backups(self) -> List[Dict[str, Any]]:
|
||||
"""List available backups"""
|
||||
try:
|
||||
backups = []
|
||||
|
||||
for file in os.listdir(self.backup_location):
|
||||
if file.endswith('_manifest.json'):
|
||||
manifest_path = os.path.join(self.backup_location, file)
|
||||
try:
|
||||
import json
|
||||
with open(manifest_path, 'r') as f:
|
||||
manifest = json.load(f)
|
||||
backups.append(manifest)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not read manifest {file}: {str(e)}")
|
||||
|
||||
# Sort by creation date (newest first)
|
||||
backups.sort(key=lambda x: x.get('created_at', ''), reverse=True)
|
||||
|
||||
return backups
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list backups: {str(e)}")
|
||||
return []
|
||||
|
||||
def get_backup_status(self, backup_id: str) -> Dict[str, Any]:
|
||||
"""Get status of a specific backup"""
|
||||
try:
|
||||
manifest_path = os.path.join(self.backup_location, f"{backup_id}_manifest.json")
|
||||
|
||||
if not os.path.exists(manifest_path):
|
||||
return {
|
||||
'status': 'not_found',
|
||||
'message': f'Backup {backup_id} not found',
|
||||
}
|
||||
|
||||
import json
|
||||
with open(manifest_path, 'r') as f:
|
||||
manifest = json.load(f)
|
||||
|
||||
# Check if all backup files still exist
|
||||
missing_files = []
|
||||
for component, info in manifest.get('components', {}).items():
|
||||
if info.get('status') == 'success' and 'path' in info:
|
||||
if not os.path.exists(info['path']):
|
||||
missing_files.append(component)
|
||||
|
||||
if missing_files:
|
||||
manifest['status'] = 'incomplete'
|
||||
manifest['missing_files'] = missing_files
|
||||
else:
|
||||
manifest['status'] = 'complete'
|
||||
|
||||
return manifest
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get backup status: {str(e)}")
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': str(e),
|
||||
}
|
||||
|
||||
|
||||
class BackupCommand(BaseCommand):
|
||||
"""Django management command for backup operations"""
|
||||
|
||||
help = 'Perform backup and restore operations'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'action',
|
||||
choices=['create', 'restore', 'list', 'status'],
|
||||
help='Action to perform'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--backup-id',
|
||||
type=str,
|
||||
help='Backup ID for restore/status operations'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--components',
|
||||
nargs='+',
|
||||
choices=['database', 'media', 'static', 'config', 'logs'],
|
||||
help='Components to backup/restore'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--include-media',
|
||||
action='store_true',
|
||||
help='Include media files in backup'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--include-static',
|
||||
action='store_true',
|
||||
help='Include static files in backup'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
backup_service = BackupService()
|
||||
|
||||
if options['action'] == 'create':
|
||||
result = backup_service.create_full_backup(
|
||||
include_media=options.get('include_media', True),
|
||||
include_static=options.get('include_static', True)
|
||||
)
|
||||
|
||||
if result['status'] == 'completed':
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f"Backup created successfully: {result['backup_id']}")
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f"Backup failed: {result.get('error', 'Unknown error')}")
|
||||
)
|
||||
|
||||
elif options['action'] == 'restore':
|
||||
if not options['backup_id']:
|
||||
self.stdout.write(
|
||||
self.style.ERROR("Backup ID is required for restore operation")
|
||||
)
|
||||
return
|
||||
|
||||
result = backup_service.restore_backup(
|
||||
options['backup_id'],
|
||||
options.get('components')
|
||||
)
|
||||
|
||||
if result['status'] == 'completed':
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f"Restore completed successfully: {result['backup_id']}")
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f"Restore failed: {result.get('error', 'Unknown error')}")
|
||||
)
|
||||
|
||||
elif options['action'] == 'list':
|
||||
backups = backup_service.list_backups()
|
||||
|
||||
if not backups:
|
||||
self.stdout.write("No backups found")
|
||||
return
|
||||
|
||||
self.stdout.write(f"Found {len(backups)} backups:")
|
||||
for backup in backups:
|
||||
self.stdout.write(f" - {backup['backup_id']} ({backup['created_at']})")
|
||||
|
||||
elif options['action'] == 'status':
|
||||
if not options['backup_id']:
|
||||
self.stdout.write(
|
||||
self.style.ERROR("Backup ID is required for status operation")
|
||||
)
|
||||
return
|
||||
|
||||
status = backup_service.get_backup_status(options['backup_id'])
|
||||
|
||||
if status['status'] == 'not_found':
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f"Backup {options['backup_id']} not found")
|
||||
)
|
||||
else:
|
||||
self.stdout.write(f"Backup Status: {status['status']}")
|
||||
if 'missing_files' in status:
|
||||
self.stdout.write(f"Missing files: {', '.join(status['missing_files'])}")
|
||||
306
ETB-API/core/celery.py
Normal file
306
ETB-API/core/celery.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""
|
||||
Celery configuration for ETB-API
|
||||
Enterprise-grade task queue with comprehensive monitoring and error handling
|
||||
"""
|
||||
import os
|
||||
from celery import Celery
|
||||
from celery.schedules import crontab
|
||||
from django.conf import settings
|
||||
|
||||
# Set the default Django settings module for the 'celery' program.
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
|
||||
|
||||
app = Celery('etb_api')
|
||||
|
||||
# Using a string here means the worker doesn't have to serialize
|
||||
# the configuration object to child processes.
|
||||
app.config_from_object('django.conf:settings', namespace='CELERY')
|
||||
|
||||
# Load task modules from all registered Django apps.
|
||||
app.autodiscover_tasks()
|
||||
|
||||
# Celery Beat Schedule for Enterprise Tasks
|
||||
app.conf.beat_schedule = {
|
||||
# Health Checks - Every minute
|
||||
'health-checks': {
|
||||
'task': 'monitoring.tasks.execute_health_checks',
|
||||
'schedule': 60.0,
|
||||
'options': {
|
||||
'priority': 9,
|
||||
'expires': 300,
|
||||
}
|
||||
},
|
||||
|
||||
# Metrics Collection - Every 5 minutes
|
||||
'metrics-collection': {
|
||||
'task': 'monitoring.tasks.collect_metrics',
|
||||
'schedule': 300.0,
|
||||
'options': {
|
||||
'priority': 7,
|
||||
'expires': 600,
|
||||
}
|
||||
},
|
||||
|
||||
# Alert Evaluation - Every minute
|
||||
'alert-evaluation': {
|
||||
'task': 'monitoring.tasks.evaluate_alerts',
|
||||
'schedule': 60.0,
|
||||
'options': {
|
||||
'priority': 8,
|
||||
'expires': 300,
|
||||
}
|
||||
},
|
||||
|
||||
# Data Cleanup - Daily at 2 AM
|
||||
'data-cleanup': {
|
||||
'task': 'monitoring.tasks.cleanup_old_data',
|
||||
'schedule': crontab(hour=2, minute=0),
|
||||
'options': {
|
||||
'priority': 5,
|
||||
'expires': 3600,
|
||||
}
|
||||
},
|
||||
|
||||
# System Status Report - Every 5 minutes
|
||||
'system-status-report': {
|
||||
'task': 'monitoring.tasks.generate_system_status_report',
|
||||
'schedule': 300.0,
|
||||
'options': {
|
||||
'priority': 6,
|
||||
'expires': 600,
|
||||
}
|
||||
},
|
||||
|
||||
# Backup Tasks - Daily at 3 AM
|
||||
'database-backup': {
|
||||
'task': 'core.tasks.backup_database',
|
||||
'schedule': crontab(hour=3, minute=0),
|
||||
'options': {
|
||||
'priority': 4,
|
||||
'expires': 7200,
|
||||
}
|
||||
},
|
||||
|
||||
# AI Model Retraining - Weekly on Sunday at 4 AM
|
||||
'ai-model-retraining': {
|
||||
'task': 'analytics_predictive_insights.tasks.retrain_predictive_models',
|
||||
'schedule': crontab(hour=4, minute=0, day_of_week=0),
|
||||
'options': {
|
||||
'priority': 3,
|
||||
'expires': 14400,
|
||||
}
|
||||
},
|
||||
|
||||
# SLA Monitoring - Every 30 seconds
|
||||
'sla-monitoring': {
|
||||
'task': 'sla_oncall.tasks.monitor_sla_breaches',
|
||||
'schedule': 30.0,
|
||||
'options': {
|
||||
'priority': 9,
|
||||
'expires': 60,
|
||||
}
|
||||
},
|
||||
|
||||
# Incident Correlation - Every 2 minutes
|
||||
'incident-correlation': {
|
||||
'task': 'incident_intelligence.tasks.correlate_incidents',
|
||||
'schedule': 120.0,
|
||||
'options': {
|
||||
'priority': 7,
|
||||
'expires': 300,
|
||||
}
|
||||
},
|
||||
|
||||
# Security Audit - Every hour
|
||||
'security-audit': {
|
||||
'task': 'security.tasks.perform_security_audit',
|
||||
'schedule': 3600.0,
|
||||
'options': {
|
||||
'priority': 6,
|
||||
'expires': 1800,
|
||||
}
|
||||
},
|
||||
|
||||
# Compliance Check - Daily at 5 AM
|
||||
'compliance-check': {
|
||||
'task': 'compliance_governance.tasks.perform_compliance_check',
|
||||
'schedule': crontab(hour=5, minute=0),
|
||||
'options': {
|
||||
'priority': 5,
|
||||
'expires': 3600,
|
||||
}
|
||||
},
|
||||
|
||||
# Knowledge Base Update - Every 6 hours
|
||||
'knowledge-base-update': {
|
||||
'task': 'knowledge_learning.tasks.update_knowledge_base',
|
||||
'schedule': 21600.0,
|
||||
'options': {
|
||||
'priority': 4,
|
||||
'expires': 1800,
|
||||
}
|
||||
},
|
||||
|
||||
# Performance Optimization - Every 15 minutes
|
||||
'performance-optimization': {
|
||||
'task': 'monitoring.tasks.optimize_performance',
|
||||
'schedule': 900.0,
|
||||
'options': {
|
||||
'priority': 5,
|
||||
'expires': 600,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
# Celery Configuration
|
||||
app.conf.update(
|
||||
# Task execution settings
|
||||
task_serializer='json',
|
||||
accept_content=['json'],
|
||||
result_serializer='json',
|
||||
timezone='UTC',
|
||||
enable_utc=True,
|
||||
|
||||
# Worker settings
|
||||
worker_prefetch_multiplier=1,
|
||||
task_acks_late=True,
|
||||
worker_max_tasks_per_child=1000,
|
||||
worker_disable_rate_limits=False,
|
||||
|
||||
# Result backend settings
|
||||
result_expires=3600,
|
||||
result_persistent=True,
|
||||
|
||||
# Task routing
|
||||
task_routes={
|
||||
'monitoring.tasks.*': {'queue': 'monitoring'},
|
||||
'security.tasks.*': {'queue': 'security'},
|
||||
'incident_intelligence.tasks.*': {'queue': 'incidents'},
|
||||
'analytics_predictive_insights.tasks.*': {'queue': 'analytics'},
|
||||
'sla_oncall.tasks.*': {'queue': 'sla'},
|
||||
'compliance_governance.tasks.*': {'queue': 'compliance'},
|
||||
'knowledge_learning.tasks.*': {'queue': 'knowledge'},
|
||||
'automation_orchestration.tasks.*': {'queue': 'automation'},
|
||||
'collaboration_war_rooms.tasks.*': {'queue': 'collaboration'},
|
||||
'core.tasks.*': {'queue': 'core'},
|
||||
},
|
||||
|
||||
# Queue configuration
|
||||
task_default_queue='default',
|
||||
task_queues={
|
||||
'default': {
|
||||
'exchange': 'default',
|
||||
'routing_key': 'default',
|
||||
},
|
||||
'monitoring': {
|
||||
'exchange': 'monitoring',
|
||||
'routing_key': 'monitoring',
|
||||
},
|
||||
'security': {
|
||||
'exchange': 'security',
|
||||
'routing_key': 'security',
|
||||
},
|
||||
'incidents': {
|
||||
'exchange': 'incidents',
|
||||
'routing_key': 'incidents',
|
||||
},
|
||||
'analytics': {
|
||||
'exchange': 'analytics',
|
||||
'routing_key': 'analytics',
|
||||
},
|
||||
'sla': {
|
||||
'exchange': 'sla',
|
||||
'routing_key': 'sla',
|
||||
},
|
||||
'compliance': {
|
||||
'exchange': 'compliance',
|
||||
'routing_key': 'compliance',
|
||||
},
|
||||
'knowledge': {
|
||||
'exchange': 'knowledge',
|
||||
'routing_key': 'knowledge',
|
||||
},
|
||||
'automation': {
|
||||
'exchange': 'automation',
|
||||
'routing_key': 'automation',
|
||||
},
|
||||
'collaboration': {
|
||||
'exchange': 'collaboration',
|
||||
'routing_key': 'collaboration',
|
||||
},
|
||||
'core': {
|
||||
'exchange': 'core',
|
||||
'routing_key': 'core',
|
||||
},
|
||||
},
|
||||
|
||||
# Error handling
|
||||
task_reject_on_worker_lost=True,
|
||||
task_ignore_result=False,
|
||||
|
||||
# Monitoring
|
||||
worker_send_task_events=True,
|
||||
task_send_sent_event=True,
|
||||
|
||||
# Retry settings
|
||||
task_default_retry_delay=60,
|
||||
task_max_retries=3,
|
||||
|
||||
# Security
|
||||
worker_hijack_root_logger=False,
|
||||
worker_log_color=False,
|
||||
)
|
||||
|
||||
# Task error handling
|
||||
@app.task(bind=True)
|
||||
def debug_task(self):
|
||||
print(f'Request: {self.request!r}')
|
||||
|
||||
# Global error handler
|
||||
@app.task(bind=True, autoretry_for=(Exception,), retry_kwargs={'max_retries': 3, 'countdown': 60})
|
||||
def error_handler(self, exc, task_id, args, kwargs, einfo):
|
||||
"""Global error handler for all tasks"""
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
logger.error(f'Task {task_id} failed: {exc}')
|
||||
logger.error(f'Args: {args}, Kwargs: {kwargs}')
|
||||
logger.error(f'Exception info: {einfo}')
|
||||
|
||||
# Send alert for critical failures
|
||||
if hasattr(self, 'retries') and self.retries >= 3:
|
||||
from monitoring.tasks import send_critical_alert
|
||||
send_critical_alert.delay(
|
||||
title=f'Critical Task Failure: {self.name}',
|
||||
message=f'Task {task_id} failed after {self.retries} retries: {exc}',
|
||||
severity='CRITICAL'
|
||||
)
|
||||
|
||||
# Task monitoring
|
||||
@app.task(bind=True)
|
||||
def monitor_task_health(self):
|
||||
"""Monitor the health of all task queues"""
|
||||
from celery import current_app
|
||||
from django.core.cache import cache
|
||||
|
||||
# Get active tasks
|
||||
inspect = current_app.control.inspect()
|
||||
active_tasks = inspect.active()
|
||||
|
||||
# Get scheduled tasks
|
||||
scheduled_tasks = inspect.scheduled()
|
||||
|
||||
# Get registered tasks
|
||||
registered_tasks = inspect.registered()
|
||||
|
||||
# Store health data in cache
|
||||
health_data = {
|
||||
'active_tasks': active_tasks,
|
||||
'scheduled_tasks': scheduled_tasks,
|
||||
'registered_tasks': registered_tasks,
|
||||
'timestamp': self.request.id,
|
||||
}
|
||||
|
||||
cache.set('celery_health', health_data, 300) # 5 minutes
|
||||
|
||||
return health_data
|
||||
603
ETB-API/core/health_checks.py
Normal file
603
ETB-API/core/health_checks.py
Normal file
@@ -0,0 +1,603 @@
|
||||
"""
|
||||
Comprehensive Health Check System for ETB-API
|
||||
Enterprise-grade health monitoring with detailed diagnostics
|
||||
"""
|
||||
import time
|
||||
import psutil
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Any, List, Optional
|
||||
from django.http import JsonResponse
|
||||
from django.db import connection
|
||||
from django.core.cache import cache
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
import redis
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HealthCheckService:
|
||||
"""Enterprise health check service with comprehensive diagnostics"""
|
||||
|
||||
def __init__(self):
|
||||
self.checks = {
|
||||
'database': self._check_database,
|
||||
'cache': self._check_cache,
|
||||
'celery': self._check_celery,
|
||||
'redis': self._check_redis,
|
||||
'disk_space': self._check_disk_space,
|
||||
'memory': self._check_memory,
|
||||
'cpu': self._check_cpu,
|
||||
'external_services': self._check_external_services,
|
||||
'modules': self._check_modules,
|
||||
'security': self._check_security,
|
||||
}
|
||||
|
||||
def perform_health_check(self, checks: Optional[List[str]] = None) -> Dict[str, Any]:
|
||||
"""Perform comprehensive health check"""
|
||||
start_time = time.time()
|
||||
results = {
|
||||
'status': 'healthy',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
'version': getattr(settings, 'VERSION', '1.0.0'),
|
||||
'environment': 'production' if not settings.DEBUG else 'development',
|
||||
'checks': {},
|
||||
'summary': {
|
||||
'total_checks': 0,
|
||||
'passed_checks': 0,
|
||||
'failed_checks': 0,
|
||||
'warning_checks': 0,
|
||||
},
|
||||
'performance': {
|
||||
'response_time_ms': 0,
|
||||
'memory_usage_mb': 0,
|
||||
'cpu_usage_percent': 0,
|
||||
}
|
||||
}
|
||||
|
||||
# Determine which checks to run
|
||||
checks_to_run = checks or list(self.checks.keys())
|
||||
|
||||
# Run each health check
|
||||
for check_name in checks_to_run:
|
||||
if check_name in self.checks:
|
||||
try:
|
||||
check_result = self.checks[check_name]()
|
||||
results['checks'][check_name] = check_result
|
||||
results['summary']['total_checks'] += 1
|
||||
|
||||
if check_result['status'] == 'healthy':
|
||||
results['summary']['passed_checks'] += 1
|
||||
elif check_result['status'] == 'warning':
|
||||
results['summary']['warning_checks'] += 1
|
||||
else:
|
||||
results['summary']['failed_checks'] += 1
|
||||
results['status'] = 'unhealthy'
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Health check {check_name} failed: {str(e)}")
|
||||
results['checks'][check_name] = {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
results['summary']['total_checks'] += 1
|
||||
results['summary']['failed_checks'] += 1
|
||||
results['status'] = 'unhealthy'
|
||||
|
||||
# Calculate performance metrics
|
||||
end_time = time.time()
|
||||
results['performance']['response_time_ms'] = round((end_time - start_time) * 1000, 2)
|
||||
results['performance']['memory_usage_mb'] = round(psutil.Process().memory_info().rss / 1024 / 1024, 2)
|
||||
results['performance']['cpu_usage_percent'] = round(psutil.cpu_percent(), 2)
|
||||
|
||||
# Determine overall status
|
||||
if results['summary']['failed_checks'] > 0:
|
||||
results['status'] = 'unhealthy'
|
||||
elif results['summary']['warning_checks'] > 0:
|
||||
results['status'] = 'degraded'
|
||||
|
||||
return results
|
||||
|
||||
def _check_database(self) -> Dict[str, Any]:
|
||||
"""Check database connectivity and performance"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# Test basic connectivity
|
||||
cursor.execute("SELECT 1")
|
||||
result = cursor.fetchone()
|
||||
|
||||
# Test database performance
|
||||
cursor.execute("SELECT COUNT(*) FROM django_migrations")
|
||||
migration_count = cursor.fetchone()[0]
|
||||
|
||||
# Check for long-running queries
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM pg_stat_activity
|
||||
WHERE state = 'active' AND query_start < NOW() - INTERVAL '30 seconds'
|
||||
""")
|
||||
long_queries = cursor.fetchone()[0]
|
||||
|
||||
response_time = (time.time() - start_time) * 1000
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Database is healthy'
|
||||
|
||||
if response_time > 1000: # 1 second
|
||||
status = 'warning'
|
||||
message = 'Database response time is slow'
|
||||
elif long_queries > 5:
|
||||
status = 'warning'
|
||||
message = 'Multiple long-running queries detected'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'response_time_ms': round(response_time, 2),
|
||||
'migration_count': migration_count,
|
||||
'long_running_queries': long_queries,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Database connection failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_cache(self) -> Dict[str, Any]:
|
||||
"""Check cache connectivity and performance"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
# Test cache write/read
|
||||
test_key = 'health_check_test'
|
||||
test_value = f'test_{time.time()}'
|
||||
|
||||
cache.set(test_key, test_value, 30)
|
||||
retrieved_value = cache.get(test_key)
|
||||
|
||||
response_time = (time.time() - start_time) * 1000
|
||||
|
||||
if retrieved_value != test_value:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': 'Cache read/write test failed',
|
||||
'response_time_ms': round(response_time, 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Cache is healthy'
|
||||
|
||||
if response_time > 100: # 100ms
|
||||
status = 'warning'
|
||||
message = 'Cache response time is slow'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'response_time_ms': round(response_time, 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Cache connection failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_celery(self) -> Dict[str, Any]:
|
||||
"""Check Celery worker status and queue health"""
|
||||
try:
|
||||
from celery import current_app
|
||||
|
||||
# Get worker status
|
||||
inspect = current_app.control.inspect()
|
||||
active_workers = inspect.active()
|
||||
scheduled_tasks = inspect.scheduled()
|
||||
registered_tasks = inspect.registered()
|
||||
|
||||
worker_count = len(active_workers) if active_workers else 0
|
||||
total_active_tasks = sum(len(tasks) for tasks in (active_workers or {}).values())
|
||||
total_scheduled_tasks = sum(len(tasks) for tasks in (scheduled_tasks or {}).values())
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Celery workers are healthy'
|
||||
|
||||
if worker_count == 0:
|
||||
status = 'unhealthy'
|
||||
message = 'No active Celery workers found'
|
||||
elif total_active_tasks > 100:
|
||||
status = 'warning'
|
||||
message = 'High number of active tasks detected'
|
||||
elif total_scheduled_tasks > 50:
|
||||
status = 'warning'
|
||||
message = 'High number of scheduled tasks detected'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'worker_count': worker_count,
|
||||
'active_tasks': total_active_tasks,
|
||||
'scheduled_tasks': total_scheduled_tasks,
|
||||
'registered_tasks': len(registered_tasks.get(list(registered_tasks.keys())[0], [])) if registered_tasks else 0,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Celery check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_redis(self) -> Dict[str, Any]:
|
||||
"""Check Redis connectivity and performance"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
# Connect to Redis
|
||||
redis_url = getattr(settings, 'CELERY_BROKER_URL', 'redis://localhost:6379/0')
|
||||
r = redis.from_url(redis_url)
|
||||
|
||||
# Test basic operations
|
||||
test_key = 'health_check_redis'
|
||||
test_value = f'test_{time.time()}'
|
||||
|
||||
r.set(test_key, test_value, ex=30)
|
||||
retrieved_value = r.get(test_key)
|
||||
|
||||
# Get Redis info
|
||||
info = r.info()
|
||||
|
||||
response_time = (time.time() - start_time) * 1000
|
||||
|
||||
if retrieved_value.decode() != test_value:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': 'Redis read/write test failed',
|
||||
'response_time_ms': round(response_time, 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Redis is healthy'
|
||||
|
||||
if response_time > 50: # 50ms
|
||||
status = 'warning'
|
||||
message = 'Redis response time is slow'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'response_time_ms': round(response_time, 2),
|
||||
'redis_version': info.get('redis_version'),
|
||||
'used_memory_human': info.get('used_memory_human'),
|
||||
'connected_clients': info.get('connected_clients'),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Redis connection failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_disk_space(self) -> Dict[str, Any]:
|
||||
"""Check disk space usage"""
|
||||
try:
|
||||
disk_usage = psutil.disk_usage('/')
|
||||
total_gb = disk_usage.total / (1024**3)
|
||||
used_gb = disk_usage.used / (1024**3)
|
||||
free_gb = disk_usage.free / (1024**3)
|
||||
usage_percent = (used_gb / total_gb) * 100
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Disk space is healthy'
|
||||
|
||||
if usage_percent > 90:
|
||||
status = 'unhealthy'
|
||||
message = 'Disk space critically low'
|
||||
elif usage_percent > 80:
|
||||
status = 'warning'
|
||||
message = 'Disk space usage is high'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'total_gb': round(total_gb, 2),
|
||||
'used_gb': round(used_gb, 2),
|
||||
'free_gb': round(free_gb, 2),
|
||||
'usage_percent': round(usage_percent, 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Disk space check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_memory(self) -> Dict[str, Any]:
|
||||
"""Check memory usage"""
|
||||
try:
|
||||
memory = psutil.virtual_memory()
|
||||
total_gb = memory.total / (1024**3)
|
||||
used_gb = memory.used / (1024**3)
|
||||
available_gb = memory.available / (1024**3)
|
||||
usage_percent = memory.percent
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Memory usage is healthy'
|
||||
|
||||
if usage_percent > 90:
|
||||
status = 'unhealthy'
|
||||
message = 'Memory usage critically high'
|
||||
elif usage_percent > 80:
|
||||
status = 'warning'
|
||||
message = 'Memory usage is high'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'total_gb': round(total_gb, 2),
|
||||
'used_gb': round(used_gb, 2),
|
||||
'available_gb': round(available_gb, 2),
|
||||
'usage_percent': round(usage_percent, 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Memory check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_cpu(self) -> Dict[str, Any]:
|
||||
"""Check CPU usage"""
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
cpu_count = psutil.cpu_count()
|
||||
load_avg = psutil.getloadavg() if hasattr(psutil, 'getloadavg') else [0, 0, 0]
|
||||
|
||||
status = 'healthy'
|
||||
message = 'CPU usage is healthy'
|
||||
|
||||
if cpu_percent > 90:
|
||||
status = 'unhealthy'
|
||||
message = 'CPU usage critically high'
|
||||
elif cpu_percent > 80:
|
||||
status = 'warning'
|
||||
message = 'CPU usage is high'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'cpu_percent': round(cpu_percent, 2),
|
||||
'cpu_count': cpu_count,
|
||||
'load_avg_1min': round(load_avg[0], 2),
|
||||
'load_avg_5min': round(load_avg[1], 2),
|
||||
'load_avg_15min': round(load_avg[2], 2),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'CPU check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_external_services(self) -> Dict[str, Any]:
|
||||
"""Check external service dependencies"""
|
||||
try:
|
||||
services = {
|
||||
'email': {
|
||||
'url': f'smtp://{getattr(settings, "EMAIL_HOST", "localhost")}:{getattr(settings, "EMAIL_PORT", "587")}',
|
||||
'timeout': 5,
|
||||
},
|
||||
}
|
||||
|
||||
results = {}
|
||||
overall_status = 'healthy'
|
||||
|
||||
for service_name, config in services.items():
|
||||
try:
|
||||
# This is a simplified check - in production, you'd implement actual service checks
|
||||
results[service_name] = {
|
||||
'status': 'healthy',
|
||||
'message': f'{service_name} service is accessible',
|
||||
'response_time_ms': 0,
|
||||
}
|
||||
except Exception as e:
|
||||
results[service_name] = {
|
||||
'status': 'unhealthy',
|
||||
'message': f'{service_name} service check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
}
|
||||
overall_status = 'unhealthy'
|
||||
|
||||
return {
|
||||
'status': overall_status,
|
||||
'message': 'External services check completed',
|
||||
'services': results,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'External services check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_modules(self) -> Dict[str, Any]:
|
||||
"""Check Django modules and apps"""
|
||||
try:
|
||||
from django.apps import apps
|
||||
|
||||
installed_apps = []
|
||||
module_status = {}
|
||||
|
||||
for app_config in apps.get_app_configs():
|
||||
app_name = app_config.name
|
||||
installed_apps.append(app_name)
|
||||
|
||||
try:
|
||||
# Check if app has models
|
||||
models = app_config.get_models()
|
||||
model_count = len(models)
|
||||
|
||||
# Check if app has migrations
|
||||
from django.db import connection
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("""
|
||||
SELECT COUNT(*) FROM django_migrations
|
||||
WHERE app = %s
|
||||
""", [app_name.split('.')[-1]])
|
||||
migration_count = cursor.fetchone()[0]
|
||||
|
||||
module_status[app_name] = {
|
||||
'status': 'healthy',
|
||||
'model_count': model_count,
|
||||
'migration_count': migration_count,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
module_status[app_name] = {
|
||||
'status': 'warning',
|
||||
'error': str(e),
|
||||
}
|
||||
|
||||
return {
|
||||
'status': 'healthy',
|
||||
'message': 'All modules are healthy',
|
||||
'installed_apps': installed_apps,
|
||||
'module_status': module_status,
|
||||
'total_apps': len(installed_apps),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Module check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
def _check_security(self) -> Dict[str, Any]:
|
||||
"""Check security-related configurations"""
|
||||
try:
|
||||
security_checks = {
|
||||
'debug_mode': not settings.DEBUG,
|
||||
'secret_key_set': bool(settings.SECRET_KEY and settings.SECRET_KEY != 'django-insecure-'),
|
||||
'https_enabled': getattr(settings, 'SECURE_SSL_REDIRECT', False),
|
||||
'hsts_enabled': getattr(settings, 'SECURE_HSTS_SECONDS', 0) > 0,
|
||||
'csrf_protection': True, # Django default
|
||||
'session_secure': getattr(settings, 'SESSION_COOKIE_SECURE', False),
|
||||
}
|
||||
|
||||
failed_checks = [check for check, passed in security_checks.items() if not passed]
|
||||
|
||||
status = 'healthy'
|
||||
message = 'Security configuration is healthy'
|
||||
|
||||
if failed_checks:
|
||||
status = 'warning'
|
||||
message = f'Security issues detected: {", ".join(failed_checks)}'
|
||||
|
||||
return {
|
||||
'status': status,
|
||||
'message': message,
|
||||
'security_checks': security_checks,
|
||||
'failed_checks': failed_checks,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'status': 'unhealthy',
|
||||
'message': f'Security check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
def health_check_view(request):
|
||||
"""Django view for health check endpoint"""
|
||||
try:
|
||||
service = HealthCheckService()
|
||||
checks = request.GET.getlist('checks')
|
||||
result = service.perform_health_check(checks if checks else None)
|
||||
|
||||
# Determine HTTP status code
|
||||
if result['status'] == 'healthy':
|
||||
status_code = 200
|
||||
elif result['status'] == 'degraded':
|
||||
status_code = 200 # Still operational
|
||||
else:
|
||||
status_code = 503 # Service unavailable
|
||||
|
||||
return JsonResponse(result, status=status_code)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Health check view failed: {str(e)}")
|
||||
return JsonResponse({
|
||||
'status': 'unhealthy',
|
||||
'message': f'Health check failed: {str(e)}',
|
||||
'error': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}, status=503)
|
||||
|
||||
|
||||
def readiness_check_view(request):
|
||||
"""Django view for readiness check (simplified health check)"""
|
||||
try:
|
||||
# Quick checks for readiness
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("SELECT 1")
|
||||
|
||||
cache.set('readiness_check', 'ok', 10)
|
||||
cache.get('readiness_check')
|
||||
|
||||
return JsonResponse({
|
||||
'status': 'ready',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Readiness check failed: {str(e)}")
|
||||
return JsonResponse({
|
||||
'status': 'not_ready',
|
||||
'message': str(e),
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
}, status=503)
|
||||
|
||||
|
||||
def liveness_check_view(request):
|
||||
"""Django view for liveness check (basic application check)"""
|
||||
return JsonResponse({
|
||||
'status': 'alive',
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
})
|
||||
587
ETB-API/core/settings.py
Normal file
587
ETB-API/core/settings.py
Normal file
@@ -0,0 +1,587 @@
|
||||
"""
|
||||
Django settings for core project.
|
||||
|
||||
Generated by 'django-admin startproject' using Django 5.2.6.
|
||||
|
||||
For more information on this file, see
|
||||
https://docs.djangoproject.com/en/5.2/topics/settings/
|
||||
|
||||
For the full list of settings and their values, see
|
||||
https://docs.djangoproject.com/en/5.2/ref/settings/
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
import os
|
||||
import logging.config
|
||||
|
||||
# Build paths inside the project like this: BASE_DIR / 'subdir'.
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
# Quick-start development settings - unsuitable for production
|
||||
# See https://docs.djangoproject.com/en/5.2/howto/deployment/checklist/
|
||||
|
||||
# SECURITY WARNING: keep the secret key used in production secret!
|
||||
SECRET_KEY = 'django-insecure-et2fv=%$5h(^!iyo+&v+hm!#52w6)%d!hx%!kvwvx9y_%*kjdj'
|
||||
|
||||
# SECURITY WARNING: don't run with debug turned on in production!
|
||||
DEBUG = True
|
||||
|
||||
ALLOWED_HOSTS = []
|
||||
|
||||
|
||||
# Application definition
|
||||
|
||||
INSTALLED_APPS = [
|
||||
'django.contrib.admin',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.messages',
|
||||
'django.contrib.staticfiles',
|
||||
'rest_framework',
|
||||
'rest_framework.authtoken',
|
||||
'django_filters',
|
||||
'corsheaders',
|
||||
'security',
|
||||
'incident_intelligence',
|
||||
'automation_orchestration',
|
||||
'sla_oncall',
|
||||
'collaboration_war_rooms',
|
||||
'compliance_governance',
|
||||
'analytics_predictive_insights',
|
||||
'knowledge_learning',
|
||||
'monitoring',
|
||||
]
|
||||
|
||||
MIDDLEWARE = [
|
||||
'corsheaders.middleware.CorsMiddleware',
|
||||
'django.middleware.security.SecurityMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.csrf.CsrfViewMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'security.middleware.zero_trust.ZeroTrustMiddleware',
|
||||
'security.middleware.zero_trust.DeviceRegistrationMiddleware',
|
||||
'security.middleware.zero_trust.RiskBasedRateLimitMiddleware',
|
||||
'incident_intelligence.security.IncidentSecurityMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
'django.middleware.clickjacking.XFrameOptionsMiddleware',
|
||||
]
|
||||
|
||||
ROOT_URLCONF = 'core.urls'
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
'django.template.context_processors.request',
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
WSGI_APPLICATION = 'core.wsgi.application'
|
||||
|
||||
|
||||
# Database
|
||||
# https://docs.djangoproject.com/en/5.2/ref/settings/#databases
|
||||
|
||||
# Production Database Configuration
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.postgresql',
|
||||
'NAME': os.getenv('DB_NAME', 'etb_incident_management'),
|
||||
'USER': os.getenv('DB_USER', 'etb_user'),
|
||||
'PASSWORD': os.getenv('DB_PASSWORD', 'secure_password'),
|
||||
'HOST': os.getenv('DB_HOST', 'localhost'),
|
||||
'PORT': os.getenv('DB_PORT', '5432'),
|
||||
'OPTIONS': {
|
||||
'sslmode': 'require',
|
||||
'connect_timeout': 10,
|
||||
},
|
||||
'CONN_MAX_AGE': 600,
|
||||
'CONN_HEALTH_CHECKS': True,
|
||||
'ATOMIC_REQUESTS': True,
|
||||
}
|
||||
}
|
||||
|
||||
# Development fallback to SQLite
|
||||
if DEBUG and not os.getenv('DB_HOST'):
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': BASE_DIR / 'db.sqlite3',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Password validation
|
||||
# https://docs.djangoproject.com/en/5.2/ref/settings/#auth-password-validators
|
||||
|
||||
AUTH_PASSWORD_VALIDATORS = [
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# Internationalization
|
||||
# https://docs.djangoproject.com/en/5.2/topics/i18n/
|
||||
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
USE_I18N = True
|
||||
|
||||
USE_TZ = True
|
||||
|
||||
|
||||
# Static files (CSS, JavaScript, Images)
|
||||
# https://docs.djangoproject.com/en/5.2/howto/static-files/
|
||||
|
||||
STATIC_URL = 'static/'
|
||||
|
||||
# Default primary key field type
|
||||
# https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field
|
||||
|
||||
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
||||
|
||||
# Django REST Framework Configuration
|
||||
REST_FRAMEWORK = {
|
||||
'DEFAULT_AUTHENTICATION_CLASSES': [
|
||||
'rest_framework.authentication.SessionAuthentication',
|
||||
'rest_framework.authentication.TokenAuthentication',
|
||||
'security.authentication.SSOAuthentication',
|
||||
],
|
||||
'DEFAULT_PERMISSION_CLASSES': [
|
||||
'rest_framework.permissions.IsAuthenticated',
|
||||
],
|
||||
'DEFAULT_RENDERER_CLASSES': [
|
||||
'rest_framework.renderers.JSONRenderer',
|
||||
],
|
||||
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
|
||||
'PAGE_SIZE': 20,
|
||||
}
|
||||
|
||||
# Security Settings
|
||||
SECURE_BROWSER_XSS_FILTER = True
|
||||
SECURE_CONTENT_TYPE_NOSNIFF = True
|
||||
X_FRAME_OPTIONS = 'DENY'
|
||||
|
||||
# Session Security
|
||||
SESSION_COOKIE_SECURE = True
|
||||
SESSION_COOKIE_HTTPONLY = True
|
||||
SESSION_COOKIE_AGE = 3600 # 1 hour
|
||||
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
||||
|
||||
# CSRF Protection
|
||||
CSRF_COOKIE_SECURE = True
|
||||
CSRF_COOKIE_HTTPONLY = True
|
||||
|
||||
# MFA Settings
|
||||
MFA_ISSUER_NAME = "ETB Incident Management"
|
||||
MFA_QR_CODE_SIZE = 200
|
||||
|
||||
# SSO Settings
|
||||
SSO_PROVIDERS = {
|
||||
'saml': {
|
||||
'enabled': True,
|
||||
'entity_id': 'https://etb-incident-management.com/saml',
|
||||
'sso_url': None, # To be configured per environment
|
||||
'x509_cert': None, # To be configured per environment
|
||||
},
|
||||
'oauth2': {
|
||||
'enabled': True,
|
||||
'providers': {
|
||||
'google': {
|
||||
'client_id': None, # To be configured per environment
|
||||
'client_secret': None, # To be configured per environment
|
||||
},
|
||||
'microsoft': {
|
||||
'client_id': None, # To be configured per environment
|
||||
'client_secret': None, # To be configured per environment
|
||||
},
|
||||
}
|
||||
},
|
||||
'ldap': {
|
||||
'enabled': True,
|
||||
'server_uri': None, # To be configured per environment
|
||||
'bind_dn': None, # To be configured per environment
|
||||
'bind_password': None, # To be configured per environment
|
||||
'user_search_base': None, # To be configured per environment
|
||||
}
|
||||
}
|
||||
|
||||
# Data Classification Levels
|
||||
DATA_CLASSIFICATION_LEVELS = [
|
||||
'PUBLIC',
|
||||
'INTERNAL',
|
||||
'CONFIDENTIAL',
|
||||
'RESTRICTED',
|
||||
'TOP_SECRET',
|
||||
]
|
||||
|
||||
# Audit Trail Settings
|
||||
AUDIT_LOG_RETENTION_DAYS = 2555 # 7 years
|
||||
AUDIT_LOG_IMMUTABLE = True
|
||||
|
||||
# Custom User Model
|
||||
AUTH_USER_MODEL = 'security.User'
|
||||
|
||||
# Zero Trust Architecture Settings
|
||||
ZERO_TRUST_ENABLED = True
|
||||
ZERO_TRUST_STRICT_MODE = False # Set to True for maximum security
|
||||
|
||||
# Geolocation API Settings
|
||||
GEO_API_KEY = None # Set your geolocation API key here
|
||||
GEO_API_PROVIDER = 'ipapi' # Options: 'ipapi', 'ipinfo', 'maxmind'
|
||||
|
||||
# Device Posture Assessment
|
||||
DEVICE_POSTURE_ENABLED = True
|
||||
DEVICE_POSTURE_STRICT_MODE = False
|
||||
DEVICE_POSTURE_UPDATE_INTERVAL = 3600 # Update device posture every hour
|
||||
|
||||
# Risk Assessment Settings
|
||||
RISK_ASSESSMENT_ENABLED = True
|
||||
RISK_ASSESSMENT_CACHE_TTL = 300 # Cache risk assessments for 5 minutes
|
||||
RISK_ASSESSMENT_ML_ENABLED = False # Enable ML-based risk assessment
|
||||
|
||||
# Behavioral Analysis Settings
|
||||
BEHAVIORAL_ANALYSIS_ENABLED = True
|
||||
BEHAVIORAL_LEARNING_PERIOD = 30 # Days to learn user behavior
|
||||
BEHAVIORAL_ANOMALY_THRESHOLD = 0.7 # Threshold for behavioral anomalies
|
||||
|
||||
# Adaptive Authentication Settings
|
||||
ADAPTIVE_AUTH_ENABLED = True
|
||||
ADAPTIVE_AUTH_FALLBACK_METHODS = ['PASSWORD', 'MFA_TOTP']
|
||||
ADAPTIVE_AUTH_MAX_ATTEMPTS = 3
|
||||
ADAPTIVE_AUTH_LOCKOUT_DURATION = 15 # minutes
|
||||
|
||||
# Rate Limiting Settings
|
||||
RATE_LIMIT_ENABLED = True
|
||||
RATE_LIMIT_BACKEND = 'memory' # Options: 'memory', 'redis', 'database'
|
||||
|
||||
# Zero Trust Logging
|
||||
ZERO_TRUST_LOG_LEVEL = 'INFO' # DEBUG, INFO, WARNING, ERROR
|
||||
ZERO_TRUST_LOG_AUDIT_EVENTS = True
|
||||
ZERO_TRUST_LOG_RISK_ASSESSMENTS = True
|
||||
|
||||
# Monitoring Settings
|
||||
MONITORING_ENABLED = True
|
||||
MONITORING_HEALTH_CHECK_INTERVAL = 60 # seconds
|
||||
MONITORING_METRICS_COLLECTION_INTERVAL = 300 # seconds (5 minutes)
|
||||
MONITORING_ALERT_EVALUATION_INTERVAL = 60 # seconds
|
||||
MONITORING_DATA_RETENTION_DAYS = 90
|
||||
MONITORING_CLEANUP_INTERVAL = 86400 # seconds (24 hours)
|
||||
|
||||
# Health Check Settings
|
||||
HEALTH_CHECK_TIMEOUT = 30 # seconds
|
||||
HEALTH_CHECK_RETRY_COUNT = 3
|
||||
HEALTH_CHECK_ENABLED_TARGETS = [
|
||||
'APPLICATION', 'DATABASE', 'CACHE', 'QUEUE', 'MODULE'
|
||||
]
|
||||
|
||||
# Metrics Collection Settings
|
||||
METRICS_COLLECTION_ENABLED = True
|
||||
METRICS_AGGREGATION_METHODS = ['AVERAGE', 'SUM', 'COUNT', 'MIN', 'MAX', 'PERCENTILE_95', 'PERCENTILE_99']
|
||||
METRICS_DEFAULT_RETENTION_DAYS = 90
|
||||
|
||||
# Alerting Settings
|
||||
ALERTING_ENABLED = True
|
||||
ALERTING_DEFAULT_CHANNELS = ['EMAIL']
|
||||
ALERTING_EMAIL_FROM = 'monitoring@etb-api.com'
|
||||
ALERTING_SLACK_WEBHOOK_URL = None # Set in environment
|
||||
ALERTING_WEBHOOK_URL = None # Set in environment
|
||||
|
||||
# Dashboard Settings
|
||||
DASHBOARD_AUTO_REFRESH_ENABLED = True
|
||||
DASHBOARD_DEFAULT_REFRESH_INTERVAL = 30 # seconds
|
||||
DASHBOARD_MAX_WIDGETS_PER_DASHBOARD = 20
|
||||
|
||||
# System Status Settings
|
||||
SYSTEM_STATUS_ENABLED = True
|
||||
SYSTEM_STATUS_UPDATE_INTERVAL = 300 # seconds (5 minutes)
|
||||
SYSTEM_STATUS_NOTIFICATION_CHANNELS = ['EMAIL', 'SLACK']
|
||||
|
||||
# Performance Monitoring
|
||||
PERFORMANCE_MONITORING_ENABLED = True
|
||||
PERFORMANCE_SLOW_QUERY_THRESHOLD = 1000 # milliseconds
|
||||
PERFORMANCE_API_RESPONSE_THRESHOLD = 2000 # milliseconds
|
||||
PERFORMANCE_MEMORY_THRESHOLD = 80 # percentage
|
||||
PERFORMANCE_CPU_THRESHOLD = 80 # percentage
|
||||
PERFORMANCE_DISK_THRESHOLD = 80 # percentage
|
||||
|
||||
# Redis Caching Configuration
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django_redis.cache.RedisCache',
|
||||
'LOCATION': os.getenv('REDIS_URL', 'redis://localhost:6379/1'),
|
||||
'OPTIONS': {
|
||||
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
|
||||
'CONNECTION_POOL_KWARGS': {
|
||||
'max_connections': 50,
|
||||
'retry_on_timeout': True,
|
||||
'socket_keepalive': True,
|
||||
'socket_keepalive_options': {},
|
||||
},
|
||||
'COMPRESSOR': 'django_redis.compressors.zlib.ZlibCompressor',
|
||||
'IGNORE_EXCEPTIONS': True,
|
||||
}
|
||||
},
|
||||
'sessions': {
|
||||
'BACKEND': 'django_redis.cache.RedisCache',
|
||||
'LOCATION': os.getenv('REDIS_URL', 'redis://localhost:6379/2'),
|
||||
'OPTIONS': {
|
||||
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Session Configuration
|
||||
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
|
||||
SESSION_CACHE_ALIAS = 'sessions'
|
||||
SESSION_COOKIE_AGE = 3600 # 1 hour
|
||||
SESSION_SAVE_EVERY_REQUEST = True
|
||||
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
||||
|
||||
# Celery Configuration
|
||||
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0')
|
||||
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0')
|
||||
CELERY_ACCEPT_CONTENT = ['json']
|
||||
CELERY_TASK_SERIALIZER = 'json'
|
||||
CELERY_RESULT_SERIALIZER = 'json'
|
||||
CELERY_TIMEZONE = 'UTC'
|
||||
CELERY_ENABLE_UTC = True
|
||||
CELERY_BEAT_SCHEDULER = 'django_celery_beat.schedulers:DatabaseScheduler'
|
||||
CELERY_WORKER_PREFETCH_MULTIPLIER = 1
|
||||
CELERY_TASK_ACKS_LATE = True
|
||||
CELERY_WORKER_MAX_TASKS_PER_CHILD = 1000
|
||||
|
||||
# Email Configuration
|
||||
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
EMAIL_HOST = os.getenv('EMAIL_HOST', 'smtp.gmail.com')
|
||||
EMAIL_PORT = int(os.getenv('EMAIL_PORT', '587'))
|
||||
EMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', 'True').lower() == 'true'
|
||||
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', '')
|
||||
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', '')
|
||||
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', 'noreply@etb-api.com')
|
||||
|
||||
# Enhanced Security Settings
|
||||
SECURE_SSL_REDIRECT = not DEBUG
|
||||
SECURE_HSTS_SECONDS = 31536000 if not DEBUG else 0 # 1 year
|
||||
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
|
||||
SECURE_HSTS_PRELOAD = True
|
||||
SECURE_CONTENT_TYPE_NOSNIFF = True
|
||||
SECURE_BROWSER_XSS_FILTER = True
|
||||
X_FRAME_OPTIONS = 'DENY'
|
||||
SECURE_REFERRER_POLICY = 'strict-origin-when-cross-origin'
|
||||
|
||||
# API Rate Limiting
|
||||
REST_FRAMEWORK = {
|
||||
'DEFAULT_AUTHENTICATION_CLASSES': [
|
||||
'rest_framework.authentication.SessionAuthentication',
|
||||
'rest_framework.authentication.TokenAuthentication',
|
||||
'security.authentication.SSOAuthentication',
|
||||
],
|
||||
'DEFAULT_PERMISSION_CLASSES': [
|
||||
'rest_framework.permissions.IsAuthenticated',
|
||||
],
|
||||
'DEFAULT_RENDERER_CLASSES': [
|
||||
'rest_framework.renderers.JSONRenderer',
|
||||
],
|
||||
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
|
||||
'PAGE_SIZE': 20,
|
||||
'DEFAULT_THROTTLE_CLASSES': [
|
||||
'rest_framework.throttling.AnonRateThrottle',
|
||||
'rest_framework.throttling.UserRateThrottle',
|
||||
'rest_framework.throttling.ScopedRateThrottle',
|
||||
],
|
||||
'DEFAULT_THROTTLE_RATES': {
|
||||
'anon': '100/hour',
|
||||
'user': '1000/hour',
|
||||
'login': '5/min',
|
||||
'register': '3/min',
|
||||
'password_reset': '3/min',
|
||||
},
|
||||
'DEFAULT_FILTER_BACKENDS': [
|
||||
'django_filters.rest_framework.DjangoFilterBackend',
|
||||
'rest_framework.filters.SearchFilter',
|
||||
'rest_framework.filters.OrderingFilter',
|
||||
],
|
||||
# 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
|
||||
# 'ALLOWED_VERSIONS': ['v1', 'v2'],
|
||||
# 'DEFAULT_VERSION': 'v1',
|
||||
}
|
||||
|
||||
# Comprehensive Logging Configuration
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'verbose': {
|
||||
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
|
||||
'style': '{',
|
||||
},
|
||||
'simple': {
|
||||
'format': '{levelname} {message}',
|
||||
'style': '{',
|
||||
},
|
||||
'json': {
|
||||
'format': '{"level": "%(levelname)s", "time": "%(asctime)s", "module": "%(module)s", "process": %(process)d, "thread": %(thread)d, "message": "%(message)s"}',
|
||||
},
|
||||
'security': {
|
||||
'format': 'SECURITY: {levelname} {asctime} {module} {message}',
|
||||
'style': '{',
|
||||
},
|
||||
},
|
||||
'filters': {
|
||||
'require_debug_false': {
|
||||
'()': 'django.utils.log.RequireDebugFalse',
|
||||
},
|
||||
'require_debug_true': {
|
||||
'()': 'django.utils.log.RequireDebugTrue',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'level': 'INFO',
|
||||
'filters': ['require_debug_true'],
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'simple'
|
||||
},
|
||||
'file': {
|
||||
'level': 'INFO',
|
||||
'filters': ['require_debug_false'],
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': '/var/log/etb-api/application.log',
|
||||
'maxBytes': 1024*1024*15, # 15MB
|
||||
'backupCount': 10,
|
||||
'formatter': 'verbose',
|
||||
},
|
||||
'json_file': {
|
||||
'level': 'INFO',
|
||||
'filters': ['require_debug_false'],
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': '/var/log/etb-api/application.json',
|
||||
'maxBytes': 1024*1024*15,
|
||||
'backupCount': 10,
|
||||
'formatter': 'json',
|
||||
},
|
||||
'security_file': {
|
||||
'level': 'WARNING',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': '/var/log/etb-api/security.log',
|
||||
'maxBytes': 1024*1024*15,
|
||||
'backupCount': 10,
|
||||
'formatter': 'security',
|
||||
},
|
||||
'error_file': {
|
||||
'level': 'ERROR',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': '/var/log/etb-api/error.log',
|
||||
'maxBytes': 1024*1024*15,
|
||||
'backupCount': 10,
|
||||
'formatter': 'verbose',
|
||||
},
|
||||
'mail_admins': {
|
||||
'level': 'ERROR',
|
||||
'filters': ['require_debug_false'],
|
||||
'class': 'django.utils.log.AdminEmailHandler',
|
||||
'formatter': 'verbose',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'django': {
|
||||
'handlers': ['console', 'file', 'json_file'],
|
||||
'level': 'INFO',
|
||||
'propagate': True,
|
||||
},
|
||||
'django.request': {
|
||||
'handlers': ['error_file', 'mail_admins'],
|
||||
'level': 'ERROR',
|
||||
'propagate': True,
|
||||
},
|
||||
'django.security': {
|
||||
'handlers': ['security_file', 'mail_admins'],
|
||||
'level': 'WARNING',
|
||||
'propagate': True,
|
||||
},
|
||||
'etb_api': {
|
||||
'handlers': ['console', 'file', 'json_file'],
|
||||
'level': 'INFO',
|
||||
'propagate': True,
|
||||
},
|
||||
'security': {
|
||||
'handlers': ['security_file', 'mail_admins'],
|
||||
'level': 'WARNING',
|
||||
'propagate': True,
|
||||
},
|
||||
'monitoring': {
|
||||
'handlers': ['console', 'file', 'json_file'],
|
||||
'level': 'INFO',
|
||||
'propagate': True,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Health Check Configuration
|
||||
HEALTH_CHECK_ENABLED = True
|
||||
HEALTH_CHECK_TIMEOUT = 30
|
||||
HEALTH_CHECK_RETRY_COUNT = 3
|
||||
HEALTH_CHECK_ENABLED_TARGETS = [
|
||||
'APPLICATION', 'DATABASE', 'CACHE', 'QUEUE', 'MODULE'
|
||||
]
|
||||
|
||||
# Backup Configuration
|
||||
BACKUP_ENABLED = True
|
||||
BACKUP_RETENTION_DAYS = 30
|
||||
BACKUP_SCHEDULE = '0 2 * * *' # Daily at 2 AM
|
||||
BACKUP_LOCATION = '/backups/etb-api/'
|
||||
|
||||
# API Documentation
|
||||
SPECTACULAR_SETTINGS = {
|
||||
'TITLE': 'ETB Incident Management API',
|
||||
'DESCRIPTION': 'Enterprise-grade incident management and response platform',
|
||||
'VERSION': '1.0.0',
|
||||
'SERVE_INCLUDE_SCHEMA': False,
|
||||
'COMPONENT_SPLIT_REQUEST': True,
|
||||
'SCHEMA_PATH_PREFIX': '/api/v1/',
|
||||
}
|
||||
|
||||
# CORS Configuration
|
||||
CORS_ALLOWED_ORIGINS = [
|
||||
"http://localhost:3000",
|
||||
"http://127.0.0.1:3000",
|
||||
]
|
||||
|
||||
CORS_ALLOW_CREDENTIALS = True
|
||||
|
||||
CORS_ALLOW_ALL_ORIGINS = True # Only for development
|
||||
|
||||
CORS_ALLOW_HEADERS = [
|
||||
'accept',
|
||||
'accept-encoding',
|
||||
'authorization',
|
||||
'content-type',
|
||||
'dnt',
|
||||
'origin',
|
||||
'user-agent',
|
||||
'x-csrftoken',
|
||||
'x-requested-with',
|
||||
]
|
||||
90
ETB-API/core/urls.py
Normal file
90
ETB-API/core/urls.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""
|
||||
URL configuration for core project.
|
||||
|
||||
The `urlpatterns` list routes URLs to views. For more information please see:
|
||||
https://docs.djangoproject.com/en/5.2/topics/http/urls/
|
||||
Examples:
|
||||
Function views
|
||||
1. Add an import: from my_app import views
|
||||
2. Add a URL to urlpatterns: path('', views.home, name='home')
|
||||
Class-based views
|
||||
1. Add an import: from other_app.views import Home
|
||||
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
|
||||
Including another URLconf
|
||||
1. Import the include() function: from django.urls import include, path
|
||||
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.urls import path, include
|
||||
from django.conf import settings
|
||||
from django.conf.urls.static import static
|
||||
from rest_framework.schemas import get_schema_view
|
||||
from drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView
|
||||
|
||||
# Health check endpoints
|
||||
from core.health_checks import health_check_view, readiness_check_view, liveness_check_view
|
||||
|
||||
# API versioning endpoints
|
||||
from core.api_versioning import api_version_info, api_migration_guide, api_changelog
|
||||
|
||||
# Security endpoints
|
||||
from security.enterprise_security import security_dashboard, compliance_report
|
||||
|
||||
# Monitoring endpoints
|
||||
from monitoring.enterprise_monitoring import metrics_endpoint, monitoring_dashboard, test_alert
|
||||
|
||||
urlpatterns = [
|
||||
# Admin
|
||||
path('admin/', admin.site.urls),
|
||||
|
||||
# Health Checks
|
||||
path('health/', health_check_view, name='health_check'),
|
||||
path('health/ready/', readiness_check_view, name='readiness_check'),
|
||||
path('health/live/', liveness_check_view, name='liveness_check'),
|
||||
|
||||
# API Versioning
|
||||
path('api/version/', api_version_info, name='api_version_info'),
|
||||
path('api/migration/', api_migration_guide, name='api_migration_guide'),
|
||||
path('api/changelog/', api_changelog, name='api_changelog'),
|
||||
|
||||
# Security
|
||||
path('api/security/dashboard/', security_dashboard, name='security_dashboard'),
|
||||
path('api/security/compliance/', compliance_report, name='compliance_report'),
|
||||
|
||||
# Monitoring
|
||||
path('api/monitoring/metrics/', metrics_endpoint, name='metrics_endpoint'),
|
||||
path('api/monitoring/dashboard/', monitoring_dashboard, name='monitoring_dashboard'),
|
||||
path('api/monitoring/test-alert/', test_alert, name='test_alert'),
|
||||
|
||||
# API Documentation
|
||||
path('api/schema/', SpectacularAPIView.as_view(), name='schema'),
|
||||
path('api/docs/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),
|
||||
path('api/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),
|
||||
|
||||
# Module APIs v1
|
||||
path('api/v1/security/', include('security.urls', namespace='security_v1')),
|
||||
path('api/v1/incidents/', include('incident_intelligence.urls', namespace='incidents_v1')),
|
||||
path('api/v1/automation/', include('automation_orchestration.urls', namespace='automation_v1')),
|
||||
path('api/v1/sla/', include('sla_oncall.urls', namespace='sla_v1')),
|
||||
path('api/v1/collaboration/', include('collaboration_war_rooms.urls', namespace='collaboration_v1')),
|
||||
path('api/v1/compliance/', include('compliance_governance.urls', namespace='compliance_v1')),
|
||||
path('api/v1/analytics/', include('analytics_predictive_insights.urls', namespace='analytics_v1')),
|
||||
path('api/v1/knowledge/', include('knowledge_learning.urls', namespace='knowledge_v1')),
|
||||
path('api/v1/monitoring/', include('monitoring.urls', namespace='monitoring_v1')),
|
||||
|
||||
# API v2 (future version)
|
||||
path('api/v2/security/', include('security.urls', namespace='security_v2')),
|
||||
path('api/v2/incidents/', include('incident_intelligence.urls', namespace='incidents_v2')),
|
||||
path('api/v2/automation/', include('automation_orchestration.urls', namespace='automation_v2')),
|
||||
path('api/v2/sla/', include('sla_oncall.urls', namespace='sla_v2')),
|
||||
path('api/v2/collaboration/', include('collaboration_war_rooms.urls', namespace='collaboration_v2')),
|
||||
path('api/v2/compliance/', include('compliance_governance.urls', namespace='compliance_v2')),
|
||||
path('api/v2/analytics/', include('analytics_predictive_insights.urls', namespace='analytics_v2')),
|
||||
path('api/v2/knowledge/', include('knowledge_learning.urls', namespace='knowledge_v2')),
|
||||
path('api/v2/monitoring/', include('monitoring.urls', namespace='monitoring_v2')),
|
||||
]
|
||||
|
||||
# Serve static and media files in development
|
||||
if settings.DEBUG:
|
||||
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
||||
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
||||
16
ETB-API/core/wsgi.py
Normal file
16
ETB-API/core/wsgi.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
WSGI config for core project.
|
||||
|
||||
It exposes the WSGI callable as a module-level variable named ``application``.
|
||||
|
||||
For more information on this file, see
|
||||
https://docs.djangoproject.com/en/5.2/howto/deployment/wsgi/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from django.core.wsgi import get_wsgi_application
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
|
||||
|
||||
application = get_wsgi_application()
|
||||
Reference in New Issue
Block a user