From 82024016cd1777d3f39250a08115550715aa7f28 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Tue, 25 Nov 2025 02:06:38 +0200 Subject: [PATCH 1/7] updates --- .gitignore | 97 ++++++ backEnd/.dockerignore | 39 --- backEnd/.gitignore | 68 ++++ backEnd/Dockerfile | 36 --- backEnd/gnx/email_backend.py | 101 ++++++ backEnd/gnx/settings.py | 49 ++- backEnd/production.env.example | 45 ++- clean-for-deploy.sh | 249 -------------- create-deployment-zip.sh | 56 ---- deploy.sh | 303 ++++++++++++++++++ docker-compose.yml | 98 ------ docker-start.sh | 240 -------------- frontEnd/.dockerignore | 26 -- frontEnd/Dockerfile | 50 --- frontEnd/app/services/[slug]/page.tsx | 6 + frontEnd/app/support-center/page.tsx | 3 +- .../components/pages/case-study/Process.tsx | 6 +- .../pages/support/KnowledgeBase.tsx | 2 +- .../shared/layout/animations/SmoothScroll.tsx | 1 - frontEnd/lib/api/serviceService.ts | 45 ++- frontEnd/lib/config/api.ts | 63 +++- frontEnd/next.config.js | 76 ++++- install-postgresql.sh | 93 ++++++ migrate-data.sh | 78 ----- migrate-sqlite-to-postgres.sh | 133 -------- nginx-gnxsoft.conf | 269 ++++++++++------ nginx-rate-limit-zones.conf | 7 + nginx.conf | 218 ------------- restart-services.sh | 16 + setup.sh | 84 ----- start-services.sh | 258 +++++++++++++++ stop-services.sh | 17 + switch-to-sqlite.sh | 34 ++ systemd/gnxsoft-backend.service | 27 ++ systemd/gnxsoft-frontend.service | 22 ++ update-keys.sh | 80 +++++ verify-deployment.sh | 283 ++++++++++++++++ 37 files changed, 1800 insertions(+), 1478 deletions(-) create mode 100644 .gitignore delete mode 100644 backEnd/.dockerignore create mode 100644 backEnd/.gitignore delete mode 100644 backEnd/Dockerfile create mode 100644 backEnd/gnx/email_backend.py delete mode 100755 clean-for-deploy.sh delete mode 100644 create-deployment-zip.sh create mode 100755 deploy.sh delete mode 100644 docker-compose.yml delete mode 100755 docker-start.sh delete mode 100644 frontEnd/.dockerignore delete mode 100644 frontEnd/Dockerfile create mode 100755 install-postgresql.sh delete mode 100755 migrate-data.sh delete mode 100755 migrate-sqlite-to-postgres.sh create mode 100644 nginx-rate-limit-zones.conf delete mode 100644 nginx.conf create mode 100755 restart-services.sh delete mode 100755 setup.sh create mode 100755 start-services.sh create mode 100755 stop-services.sh create mode 100644 switch-to-sqlite.sh create mode 100644 systemd/gnxsoft-backend.service create mode 100644 systemd/gnxsoft-frontend.service create mode 100755 update-keys.sh create mode 100755 verify-deployment.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..0cf3cf5d --- /dev/null +++ b/.gitignore @@ -0,0 +1,97 @@ +# Environment files +.env +.env.local +.env.production +.env.*.local +backEnd/.env +frontEnd/.env.production +frontEnd/.env.local + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +venv/ +env/ +ENV/ +.venv + +# Django +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal +backEnd/media/ +backEnd/staticfiles/ +backEnd/static/ +backEnd/logs/ + +# Node.js +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +frontEnd/.next/ +frontEnd/out/ +frontEnd/build/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +logs/ + +# Coverage +htmlcov/ +.coverage +.coverage.* +coverage.xml +*.cover + +# Testing +.pytest_cache/ +.tox/ + +# PM2 +.pm2/ + +# SSL Certificates +*.pem +*.key +*.crt + +# Backup files +*.sql +*.backup +*.bak + +# Temporary files +*.tmp +*.temp + diff --git a/backEnd/.dockerignore b/backEnd/.dockerignore deleted file mode 100644 index deaf5286..00000000 --- a/backEnd/.dockerignore +++ /dev/null @@ -1,39 +0,0 @@ -__pycache__ -*.pyc -*.pyo -*.pyd -.Python -*.so -*.egg -*.egg-info -dist -build -.venv -venv/ -env/ -ENV/ -.env -.venv -*.log -logs/ -*.db -*.sqlite3 -db.sqlite3 -.git -.gitignore -README.md -*.md -.DS_Store -.vscode -.idea -*.swp -*.swo -*~ -.pytest_cache -.coverage -htmlcov/ -.tox/ -.mypy_cache/ -.dmypy.json -dmypy.json - diff --git a/backEnd/.gitignore b/backEnd/.gitignore new file mode 100644 index 00000000..dfe2f16a --- /dev/null +++ b/backEnd/.gitignore @@ -0,0 +1,68 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual Environment +venv/ +env/ +ENV/ +.venv + +# Django +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal +/media +/staticfiles +/static + +# Environment variables +.env +.env.local +.env.*.local + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +logs/ +*.log + +# Coverage +htmlcov/ +.coverage +.coverage.* +coverage.xml +*.cover + +# Testing +.pytest_cache/ +.tox/ + diff --git a/backEnd/Dockerfile b/backEnd/Dockerfile deleted file mode 100644 index 657a230b..00000000 --- a/backEnd/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -# Django Backend Dockerfile -FROM python:3.12-slim - -# Set environment variables -ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 \ - DEBIAN_FRONTEND=noninteractive - -# Set work directory -WORKDIR /app - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - gcc \ - postgresql-client \ - && rm -rf /var/lib/apt/lists/* - -# Install Python dependencies -COPY requirements.txt /app/ -RUN pip install --no-cache-dir -r requirements.txt - -# Copy project -COPY . /app/ - -# Create directories for media and static files -RUN mkdir -p /app/media /app/staticfiles /app/logs - -# Collect static files (will be done at runtime if needed) -# RUN python manage.py collectstatic --noinput - -# Expose port -EXPOSE 1086 - -# Run gunicorn -CMD ["gunicorn", "--bind", "0.0.0.0:1086", "--workers", "3", "--timeout", "120", "--access-logfile", "-", "--error-logfile", "-", "gnx.wsgi:application"] - diff --git a/backEnd/gnx/email_backend.py b/backEnd/gnx/email_backend.py new file mode 100644 index 00000000..070d64c7 --- /dev/null +++ b/backEnd/gnx/email_backend.py @@ -0,0 +1,101 @@ +""" +Custom email backend that handles localhost SSL certificate issues. +Disables SSL certificate verification for localhost connections. +""" +import ssl +from django.core.mail.backends.smtp import EmailBackend +from django.conf import settings +import logging + +logger = logging.getLogger(__name__) + + +class LocalhostSMTPBackend(EmailBackend): + """ + Custom SMTP backend that disables SSL certificate verification + for localhost connections. This is safe for localhost mail servers. + """ + + def open(self): + """ + Override to create SSL context without certificate verification + when connecting to localhost. + """ + if self.use_ssl or self.use_tls: + # Check if connecting to localhost + if self.host in ['localhost', '127.0.0.1', '::1']: + # Create SSL context without certificate verification for localhost + self.connection = None + try: + import smtplib + + if self.use_ssl: + # For SSL connections + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + # SMTP_SSL uses 'context' parameter (Python 3.3+) + import sys + if sys.version_info >= (3, 3): + self.connection = smtplib.SMTP_SSL( + self.host, + self.port, + timeout=self.timeout, + context=context + ) + else: + # For older Python, use unverified context + self.connection = smtplib.SMTP_SSL( + self.host, + self.port, + timeout=self.timeout + ) + else: + # For TLS connections + self.connection = smtplib.SMTP( + self.host, + self.port, + timeout=self.timeout + ) + # Create SSL context without certificate verification + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + # Use context parameter (Python 3.4+ uses 'context', not 'ssl_context') + # For older versions, we'll need to patch the socket after starttls + import sys + if sys.version_info >= (3, 4): + # Python 3.4+ supports context parameter + self.connection.starttls(context=context) + else: + # For older Python, disable verification globally for this connection + # by monkey-patching ssl._create_default_https_context temporarily + original_context = ssl._create_default_https_context + ssl._create_default_https_context = ssl._create_unverified_context + try: + self.connection.starttls() + finally: + ssl._create_default_https_context = original_context + + if self.username and self.password: + self.connection.login(self.username, self.password) + + logger.info(f"Successfully connected to localhost mail server at {self.host}:{self.port}") + return True + + except Exception as e: + logger.error(f"Failed to connect to localhost mail server: {str(e)}") + if self.connection: + try: + self.connection.quit() + except: + pass + self.connection = None + raise + else: + # For non-localhost, use standard SSL/TLS with certificate verification + return super().open() + else: + # No SSL/TLS, use standard connection + return super().open() + diff --git a/backEnd/gnx/settings.py b/backEnd/gnx/settings.py index 6dfa1e29..00db7acd 100644 --- a/backEnd/gnx/settings.py +++ b/backEnd/gnx/settings.py @@ -98,22 +98,34 @@ WSGI_APPLICATION = 'gnx.wsgi.application' # Database # https://docs.djangoproject.com/en/4.2/ref/settings/#databases -# Support both PostgreSQL (production) and SQLite (development) -DATABASE_URL = config('DATABASE_URL', default='') -if DATABASE_URL and DATABASE_URL.startswith('postgresql://'): - # PostgreSQL configuration - import dj_database_url - DATABASES = { - 'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600) - } +# Force SQLite - change this to False and set USE_POSTGRESQL=True to use PostgreSQL +FORCE_SQLITE = True # Set to False to allow PostgreSQL + +if not FORCE_SQLITE: + # PostgreSQL configuration (only if FORCE_SQLITE is False) + USE_POSTGRESQL = config('USE_POSTGRESQL', default='False', cast=bool) + DATABASE_URL = config('DATABASE_URL', default='') + if USE_POSTGRESQL and DATABASE_URL and DATABASE_URL.startswith('postgresql://'): + import dj_database_url + DATABASES = { + 'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600) + } + else: + # Fallback to SQLite + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } + } else: - # SQLite configuration (development/fallback) -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': BASE_DIR / 'db.sqlite3', + # SQLite configuration (forced) + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } } -} # Password validation @@ -355,8 +367,12 @@ if DEBUG and not USE_SMTP_IN_DEV: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' else: # Production or Dev with SMTP enabled - use SMTP backend - EMAIL_BACKEND = config('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend') EMAIL_HOST = config('EMAIL_HOST', default='mail.gnxsoft.com') + # Use custom backend for localhost to handle SSL certificate issues + if EMAIL_HOST in ['localhost', '127.0.0.1', '::1']: + EMAIL_BACKEND = 'gnx.email_backend.LocalhostSMTPBackend' + else: + EMAIL_BACKEND = config('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend') EMAIL_PORT = config('EMAIL_PORT', default=587, cast=int) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=True, cast=bool) EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool) @@ -367,7 +383,8 @@ else: EMAIL_TIMEOUT = config('EMAIL_TIMEOUT', default=30, cast=int) # Site URL for email links -SITE_URL = config('SITE_URL', default='http://localhost:3000') +# Use production URL by default if not in DEBUG mode +SITE_URL = config('SITE_URL', default='https://gnxsoft.com' if not DEBUG else 'http://localhost:3000') # Email connection settings for production reliability EMAIL_CONNECTION_TIMEOUT = config('EMAIL_CONNECTION_TIMEOUT', default=10, cast=int) diff --git a/backEnd/production.env.example b/backEnd/production.env.example index 8b693a4b..b4a91a12 100644 --- a/backEnd/production.env.example +++ b/backEnd/production.env.example @@ -1,26 +1,33 @@ -# Production Environment Configuration for GNX Contact Form -# Copy this file to .env and update with your actual values +# Production Environment Configuration for GNX-WEB +# Copy this file to .env in the backEnd directory and update with your actual values +# Backend runs on port 1086 (internal only, proxied through nginx) # Django Settings -SECRET_KEY=your-super-secret-production-key-here +SECRET_KEY=your-super-secret-production-key-here-change-this-immediately DEBUG=False -ALLOWED_HOSTS=gnxsoft.com,www.gnxsoft.com,your-server-ip +ALLOWED_HOSTS=gnxsoft.com,www.gnxsoft.com,your-server-ip,localhost,127.0.0.1 -# Database - Using SQLite (default) -# SQLite is configured in settings.py - no DATABASE_URL needed +# Database - PostgreSQL on host (port 5433 to avoid conflict with Docker instance on 5432) +# Format: postgresql://USER:PASSWORD@HOST:PORT/DBNAME +# Create database: sudo -u postgres psql +# CREATE DATABASE gnx_db; +# CREATE USER gnx_user WITH PASSWORD 'your_secure_password'; +# GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user; +DATABASE_URL=postgresql://gnx_user:your_password_here@localhost:5433/gnx_db # Email Configuration (Production) EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend -EMAIL_HOST=smtp.gmail.com +EMAIL_HOST=mail.gnxsoft.com EMAIL_PORT=587 EMAIL_USE_TLS=True EMAIL_USE_SSL=False -EMAIL_HOST_USER=your-email@gmail.com -EMAIL_HOST_PASSWORD=your-app-password +EMAIL_HOST_USER=your-email@gnxsoft.com +EMAIL_HOST_PASSWORD=your-email-password DEFAULT_FROM_EMAIL=noreply@gnxsoft.com # Company email for contact form notifications COMPANY_EMAIL=contact@gnxsoft.com +SUPPORT_EMAIL=support@gnxsoft.com # Email timeout settings for production reliability EMAIL_TIMEOUT=30 @@ -35,6 +42,8 @@ SECURE_HSTS_PRELOAD=True SECURE_CONTENT_TYPE_NOSNIFF=True SECURE_BROWSER_XSS_FILTER=True X_FRAME_OPTIONS=DENY +SESSION_COOKIE_SECURE=True +CSRF_COOKIE_SECURE=True # CORS Settings (Production) PRODUCTION_ORIGINS=https://gnxsoft.com,https://www.gnxsoft.com @@ -47,15 +56,27 @@ CSRF_TRUSTED_ORIGINS=https://gnxsoft.com,https://www.gnxsoft.com # REQUIRED in production! Auto-generated only in DEBUG mode. # Generate a secure key: python -c "import secrets; print(secrets.token_urlsafe(32))" # Or get current key: python manage.py show_api_key +# This key must match the one in nginx configuration INTERNAL_API_KEY=your-secure-api-key-here-change-this-in-production # Admin IP Restriction - Only these IPs can access Django admin # Comma-separated list of IP addresses or CIDR networks (e.g., 193.194.155.249 or 192.168.1.0/24) ADMIN_ALLOWED_IPS=193.194.155.249 -# Static Files -STATIC_ROOT=/var/www/gnx/staticfiles/ -MEDIA_ROOT=/var/www/gnx/media/ +# Custom allowed IPs for IP whitelist middleware (optional, comma-separated) +CUSTOM_ALLOWED_IPS= + +# Site URL for email links and absolute URLs +SITE_URL=https://gnxsoft.com + +# Static and Media Files (relative to backEnd directory) +# These will be collected/served from these locations +STATIC_ROOT=/home/gnx/Desktop/GNX-WEB/backEnd/staticfiles +MEDIA_ROOT=/home/gnx/Desktop/GNX-WEB/backEnd/media # Logging LOG_LEVEL=INFO + +# Backend Port (internal only, nginx proxies to this) +# Backend runs on 127.0.0.1:1086 +BACKEND_PORT=1086 diff --git a/clean-for-deploy.sh b/clean-for-deploy.sh deleted file mode 100755 index 06526dd6..00000000 --- a/clean-for-deploy.sh +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/bash -# Clean script for GNX Web Application - Prepares project for deployment -# This script removes all cache files, build artifacts, and temporary files - -set -e - -echo "๐Ÿงน Cleaning GNX Web Application for deployment..." -echo "" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Function to safely remove directories -remove_dir() { - if [ -d "$1" ]; then - echo -e "${YELLOW}Removing: $1${NC}" - rm -rf "$1" - echo -e "${GREEN}โœ… Removed: $1${NC}" - fi -} - -# Function to safely remove files -remove_file() { - if [ -f "$1" ]; then - echo -e "${YELLOW}Removing: $1${NC}" - rm -f "$1" - echo -e "${GREEN}โœ… Removed: $1${NC}" - fi -} - -# Function to find and remove files by pattern -remove_pattern() { - find . -name "$1" -type f -not -path "./.git/*" -not -path "./node_modules/*" 2>/dev/null | while read -r file; do - echo -e "${YELLOW}Removing: $file${NC}" - rm -f "$file" - done - echo -e "${GREEN}โœ… Cleaned: $1${NC}" -} - -# Function to find and remove directories by pattern -remove_dir_pattern() { - find . -name "$1" -type d -not -path "./.git/*" -not -path "./node_modules/*" 2>/dev/null | while read -r dir; do - echo -e "${YELLOW}Removing: $dir${NC}" - rm -rf "$dir" - done - echo -e "${GREEN}โœ… Cleaned: $1${NC}" -} - -echo "๐Ÿ“ฆ Step 1: Stopping Docker containers (if running)..." -docker-compose down 2>/dev/null || true -echo "" - -echo "๐Ÿ“ฆ Step 2: Removing Docker volumes (optional - uncomment if needed)..." -# Uncomment the next line if you want to remove Docker volumes (WARNING: This deletes database data!) -# docker-compose down -v 2>/dev/null || true -echo "" - -echo "๐Ÿ“ฆ Step 3: Removing Docker build cache..." -docker system prune -f --volumes 2>/dev/null || true -echo "" - -echo "๐Ÿ Step 4: Cleaning Python artifacts..." - -# Remove Python cache directories -remove_dir_pattern "__pycache__" - -# Remove Python compiled files -remove_pattern "*.pyc" -remove_pattern "*.pyo" -remove_pattern "*.pyd" - -# Remove Python egg files -remove_pattern "*.egg" -remove_dir_pattern "*.egg-info" - -# Remove Python virtual environments -remove_dir "backEnd/venv" -remove_dir "frontEnd/venv" -remove_dir ".venv" -remove_dir "venv" -remove_dir "env" -remove_dir "ENV" - -# Remove Python build directories -remove_dir "backEnd/build" -remove_dir "backEnd/dist" -remove_dir "frontEnd/build" -remove_dir "frontEnd/dist" - -# Remove Python test artifacts -remove_dir ".pytest_cache" -remove_dir ".coverage" -remove_dir "htmlcov" -remove_dir ".tox" -remove_dir ".mypy_cache" -remove_file ".dmypy.json" -remove_file "dmypy.json" - -echo "" - -echo "๐Ÿ“ฆ Step 5: Cleaning Node.js artifacts..." - -# Remove node_modules -remove_dir "frontEnd/node_modules" - -# Remove Next.js build artifacts -remove_dir "frontEnd/.next" -remove_dir "frontEnd/out" -remove_dir "frontEnd/build" -remove_dir "frontEnd/.pnp" -remove_file "frontEnd/.pnp.js" - -# Remove TypeScript build info -remove_pattern "*.tsbuildinfo" -remove_file "frontEnd/next-env.d.ts" - -# Remove package manager files -remove_file "frontEnd/.yarn/install-state.gz" - -echo "" - -echo "๐Ÿ“ Step 6: Cleaning log files..." - -# Remove log files -remove_pattern "*.log" -remove_dir "backEnd/logs" -remove_file "frontEnd/dev.log" -remove_file "frontEnd/npm-debug.log*" -remove_file "frontEnd/yarn-debug.log*" -remove_file "frontEnd/yarn-error.log*" - -echo "" - -echo "๐Ÿ—„๏ธ Step 7: Cleaning database files..." - -# Remove SQLite databases (keep if you need them, but typically not for deployment) -# Uncomment if you want to remove SQLite files -# remove_file "backEnd/db.sqlite3" -# remove_pattern "*.db" -# remove_pattern "*.sqlite" -# remove_pattern "*.sqlite3" - -# Remove migration marker files -remove_file ".migrated_to_postgres" - -echo "" - -echo "๐Ÿ“ Step 8: Cleaning static files (will be regenerated on build)..." - -# Remove collected static files (they'll be regenerated) -remove_dir "backEnd/staticfiles" - -echo "" - -echo "๐Ÿ’พ Step 9: Cleaning backup files..." - -# Remove backup files -remove_pattern "*.backup" -remove_pattern "*.bak" -remove_pattern "*~" -remove_pattern "*.swp" -remove_pattern "*.swo" -remove_dir "backups" - -echo "" - -echo "๐Ÿ–ฅ๏ธ Step 10: Cleaning IDE and OS files..." - -# Remove IDE directories -remove_dir ".vscode" -remove_dir ".idea" -remove_dir "backEnd/.vscode" -remove_dir "backEnd/.idea" -remove_dir "frontEnd/.vscode" -remove_dir "frontEnd/.idea" - -# Remove OS files -remove_pattern ".DS_Store" -remove_pattern "Thumbs.db" -remove_pattern ".DS_Store?" - -echo "" - -echo "๐Ÿ” Step 11: Cleaning environment files (keeping examples)..." - -# Remove local env files (keep examples) -remove_file ".env.local" -remove_file ".env.development.local" -remove_file ".env.test.local" -remove_file ".env.production.local" -remove_file "frontEnd/.env.local" -remove_file "frontEnd/.env.development.local" -remove_file "frontEnd/.env.test.local" -remove_file "frontEnd/.env.production.local" - -# Note: We keep .env.production as it's needed for deployment -echo -e "${YELLOW}โš ๏ธ Note: .env.production is kept (needed for deployment)${NC}" - -echo "" - -echo "๐Ÿ“ฆ Step 12: Cleaning other artifacts..." - -# Remove coverage directories -remove_dir "coverage" -remove_dir ".nyc_output" -remove_dir "frontEnd/coverage" - -# Remove vercel directory -remove_dir "frontEnd/.vercel" - -# Remove certificate files (if any) -remove_pattern "*.pem" - -echo "" - -echo "๐Ÿงน Step 13: Final cleanup..." - -# Remove any remaining temporary files -find . -name "*.tmp" -type f -not -path "./.git/*" 2>/dev/null | while read -r file; do - remove_file "$file" -done - -# Remove empty directories (optional - be careful with this) -# find . -type d -empty -not -path "./.git/*" -not -path "./node_modules/*" -delete 2>/dev/null || true - -echo "" - -echo "โœ… Cleanup complete!" -echo "" -echo "๐Ÿ“‹ Summary:" -echo " - Python cache files removed" -echo " - Virtual environments removed" -echo " - Node.js artifacts removed" -echo " - Build artifacts removed" -echo " - Log files removed" -echo " - IDE/OS files removed" -echo "" -echo "โš ๏ธ Important notes:" -echo " - .env.production is kept (needed for deployment)" -echo " - Media files are kept (user uploads)" -echo " - Docker volumes were NOT removed (database data preserved)" -echo " - If you need a complete clean, uncomment Docker volume removal in the script" -echo "" -echo "๐Ÿš€ Project is now ready for deployment!" -echo " Run: ./docker-start.sh to start the stack" - diff --git a/create-deployment-zip.sh b/create-deployment-zip.sh deleted file mode 100644 index 255fcfc5..00000000 --- a/create-deployment-zip.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -# Script to create a production deployment zip file - -set -e - -ZIP_NAME="gnx-web-production-$(date +%Y%m%d).zip" -TEMP_DIR=$(mktemp -d) - -echo "๐Ÿ“ฆ Creating deployment package: $ZIP_NAME" -echo "" - -# Copy files to temp directory -echo "๐Ÿ“‹ Copying files..." -rsync -av --progress \ - --exclude='.git' \ - --exclude='node_modules' \ - --exclude='__pycache__' \ - --exclude='*.pyc' \ - --exclude='venv' \ - --exclude='env' \ - --exclude='.venv' \ - --exclude='*.log' \ - --exclude='*.sqlite3' \ - --exclude='backups' \ - --exclude='*.swp' \ - --exclude='*.swo' \ - --exclude='.DS_Store' \ - --exclude='.vscode' \ - --exclude='.idea' \ - --exclude='.next' \ - --exclude='dist' \ - --exclude='build' \ - --exclude='*.egg-info' \ - --exclude='.dockerignore' \ - --exclude='.zipignore' \ - ./ "$TEMP_DIR/gnx-web/" - -# Create zip -echo "" -echo "๐Ÿ—œ๏ธ Creating zip file..." -cd "$TEMP_DIR" -zip -r "$ZIP_NAME" gnx-web/ > /dev/null - -# Move to original directory -mv "$ZIP_NAME" "$OLDPWD/" - -# Cleanup -cd "$OLDPWD" -rm -rf "$TEMP_DIR" - -echo "โœ… Deployment package created: $ZIP_NAME" -echo "" -echo "๐Ÿ“‹ File size: $(du -h "$ZIP_NAME" | cut -f1)" -echo "" -echo "๐Ÿ“ค Ready to upload to server!" - diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 00000000..0c6804f3 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,303 @@ +#!/bin/bash + +# GNX-WEB Complete Deployment Script +# This script sets up and deploys the entire application + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Get script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BACKEND_DIR="$SCRIPT_DIR/backEnd" +FRONTEND_DIR="$SCRIPT_DIR/frontEnd" + +# Function to generate secure random key +generate_secret_key() { + python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \ + openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1 +} + +# Function to update .env file with generated keys +update_env_file() { + local env_file="$1" + local secret_key="$2" + local api_key="$3" + + # Update SECRET_KEY + if grep -q "^SECRET_KEY=" "$env_file"; then + sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$secret_key|" "$env_file" + else + echo "SECRET_KEY=$secret_key" >> "$env_file" + fi + + # Update INTERNAL_API_KEY + if grep -q "^INTERNAL_API_KEY=" "$env_file"; then + sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$api_key|" "$env_file" + else + echo "INTERNAL_API_KEY=$api_key" >> "$env_file" + fi + + # Update STATIC_ROOT and MEDIA_ROOT paths + sed -i "s|^STATIC_ROOT=.*|STATIC_ROOT=$BACKEND_DIR/staticfiles|" "$env_file" + sed -i "s|^MEDIA_ROOT=.*|MEDIA_ROOT=$BACKEND_DIR/media|" "$env_file" +} + +# Function to update nginx config with API key +update_nginx_config() { + local nginx_config="$1" + local api_key="$2" + + # Escape special characters in API key for sed + local escaped_key=$(echo "$api_key" | sed 's/[[\.*^$()+?{|]/\\&/g') + + # Update API key in both /api/ and /admin/ locations + sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" "$nginx_config" +} + +echo -e "${BLUE}==========================================" +echo "GNX-WEB Deployment Script" +echo "==========================================${NC}" +echo "" + +# Check if running as root for system-level operations +if [ "$EUID" -ne 0 ]; then + echo -e "${YELLOW}Note: Some operations require root privileges${NC}" + echo -e "${YELLOW}You may be prompted for sudo password${NC}" + echo "" +fi + +# Generate secure keys +echo -e "${GREEN}[0/8] Generating secure keys...${NC}" +SECRET_KEY=$(generate_secret_key 50) +INTERNAL_API_KEY=$(generate_secret_key 32) +echo -e "${GREEN}โœ“ Generated SECRET_KEY${NC}" +echo -e "${GREEN}โœ“ Generated INTERNAL_API_KEY${NC}" +echo "" + +# Step 1: Install PostgreSQL +echo -e "${GREEN}[1/8] Installing PostgreSQL...${NC}" +if [ -f "$SCRIPT_DIR/install-postgresql.sh" ]; then + sudo bash "$SCRIPT_DIR/install-postgresql.sh" +else + echo -e "${RED}Error: install-postgresql.sh not found${NC}" + exit 1 +fi + +# Step 2: Setup Backend +echo -e "${GREEN}[2/8] Setting up Backend...${NC}" +cd "$BACKEND_DIR" + +# Create virtual environment if it doesn't exist +if [ ! -d "venv" ]; then + echo -e "${BLUE}Creating Python virtual environment...${NC}" + python3 -m venv venv +fi + +# Activate virtual environment +source venv/bin/activate + +# Install Python dependencies +echo -e "${BLUE}Installing Python dependencies...${NC}" +pip install --upgrade pip +pip install -r requirements.txt + +# Create .env file if it doesn't exist +if [ ! -f ".env" ]; then + echo -e "${BLUE}Creating .env file from production.env.example...${NC}" + cp production.env.example .env +fi + +# Update .env file with generated keys and paths +echo -e "${BLUE}Updating .env file with generated keys...${NC}" +update_env_file ".env" "$SECRET_KEY" "$INTERNAL_API_KEY" +echo -e "${GREEN}โœ“ Updated .env file with generated keys${NC}" + +# Check if critical values still need to be updated +if grep -q "your_password_here\|your-email\|your-server-ip" .env; then + echo -e "${YELLOW}โš  Some values in .env still need to be updated:${NC}" + echo -e "${YELLOW} - DATABASE_URL (database password)${NC}" + echo -e "${YELLOW} - Email settings${NC}" + echo -e "${YELLOW} - ALLOWED_HOSTS (server IP/domain)${NC}" + echo -e "${YELLOW} - ADMIN_ALLOWED_IPS${NC}" + echo "" + echo -e "${YELLOW}Press Enter to continue (you can update these later)...${NC}" + read +fi + +# Create necessary directories +mkdir -p logs media staticfiles + +# Step 3: Setup Database +echo -e "${GREEN}[3/8] Setting up Database...${NC}" +echo -e "${YELLOW}Make sure PostgreSQL is running and database is created${NC}" +echo -e "${YELLOW}Run these commands if needed:${NC}" +echo " sudo -u postgres psql" +echo " CREATE DATABASE gnx_db;" +echo " CREATE USER gnx_user WITH PASSWORD 'your_password';" +echo " GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user;" +echo "" +echo -e "${YELLOW}Press Enter to continue after database is ready...${NC}" +read + +# Run migrations +echo -e "${BLUE}Running database migrations...${NC}" +python manage.py migrate --noinput + +# Collect static files +echo -e "${BLUE}Collecting static files...${NC}" +python manage.py collectstatic --noinput + +# Step 4: Setup Frontend +echo -e "${GREEN}[4/8] Setting up Frontend...${NC}" +cd "$FRONTEND_DIR" + +# Install Node.js dependencies +if [ ! -d "node_modules" ]; then + echo -e "${BLUE}Installing Node.js dependencies...${NC}" + npm install +fi + +# Create .env.production if it doesn't exist +if [ ! -f ".env.production" ]; then + echo -e "${BLUE}Creating .env.production file...${NC}" + cat > .env.production << EOF +NEXT_PUBLIC_SITE_URL=https://gnxsoft.com +NEXT_PUBLIC_API_URL= +PORT=1087 +NODE_ENV=production +NEXT_TELEMETRY_DISABLED=1 +EOF + echo -e "${GREEN}โœ“ Created .env.production${NC}" +else + # Update PORT if it exists but is different + if ! grep -q "^PORT=1087" .env.production; then + echo -e "${BLUE}Updating PORT in .env.production...${NC}" + if grep -q "^PORT=" .env.production; then + sed -i "s|^PORT=.*|PORT=1087|" .env.production + else + echo "PORT=1087" >> .env.production + fi + echo -e "${GREEN}โœ“ Updated PORT in .env.production${NC}" + fi + + # Ensure NODE_ENV is set to production + if ! grep -q "^NODE_ENV=production" .env.production; then + if grep -q "^NODE_ENV=" .env.production; then + sed -i "s|^NODE_ENV=.*|NODE_ENV=production|" .env.production + else + echo "NODE_ENV=production" >> .env.production + fi + fi +fi + +# Build frontend +echo -e "${BLUE}Building frontend for production...${NC}" +NODE_ENV=production PORT=1087 npm run build + +# Step 5: Install PM2 +echo -e "${GREEN}[5/8] Installing PM2...${NC}" +if ! command -v pm2 &> /dev/null; then + echo -e "${BLUE}Installing PM2 globally...${NC}" + sudo npm install -g pm2 + pm2 startup systemd -u $USER --hp $HOME + echo -e "${YELLOW}Please run the command shown above to enable PM2 on boot${NC}" +else + echo -e "${GREEN}PM2 is already installed${NC}" +fi + +# Step 6: Configure Firewall +echo -e "${GREEN}[6/8] Configuring Firewall...${NC}" +if command -v ufw &> /dev/null; then + echo -e "${BLUE}Configuring UFW firewall...${NC}" + sudo ufw allow 80/tcp comment 'HTTP' + sudo ufw allow 443/tcp comment 'HTTPS' + sudo ufw deny 1086/tcp comment 'Backend - Internal Only' + sudo ufw deny 1087/tcp comment 'Frontend - Internal Only' + sudo ufw deny 5433/tcp comment 'PostgreSQL - Internal Only' + echo -e "${YELLOW}Firewall rules configured. Enable with: sudo ufw enable${NC}" +else + echo -e "${YELLOW}UFW not found. Please configure firewall manually${NC}" +fi + +# Step 7: Setup Nginx +echo -e "${GREEN}[7/8] Setting up Nginx...${NC}" +if command -v nginx &> /dev/null; then + echo -e "${BLUE}Copying nginx configuration...${NC}" + sudo cp "$SCRIPT_DIR/nginx-gnxsoft.conf" /etc/nginx/sites-available/gnxsoft + + # Update paths in nginx config + sudo sed -i "s|/home/gnx/Desktop/GNX-WEB|$SCRIPT_DIR|g" /etc/nginx/sites-available/gnxsoft + + # Update INTERNAL_API_KEY in nginx config + echo -e "${BLUE}Updating nginx configuration with INTERNAL_API_KEY...${NC}" + update_nginx_config "/etc/nginx/sites-available/gnxsoft" "$INTERNAL_API_KEY" + echo -e "${GREEN}โœ“ Updated nginx config with INTERNAL_API_KEY${NC}" + + # Enable site + if [ ! -L /etc/nginx/sites-enabled/gnxsoft ]; then + sudo ln -s /etc/nginx/sites-available/gnxsoft /etc/nginx/sites-enabled/ + fi + + # Remove default nginx site if it exists + if [ -L /etc/nginx/sites-enabled/default ]; then + sudo rm /etc/nginx/sites-enabled/default + fi + + # Test nginx configuration + echo -e "${BLUE}Testing nginx configuration...${NC}" + if sudo nginx -t; then + echo -e "${GREEN}โœ“ Nginx configuration is valid${NC}" + else + echo -e "${RED}โœ— Nginx configuration has errors${NC}" + echo -e "${YELLOW}Please check the configuration manually${NC}" + fi + + echo -e "${YELLOW}Nginx configured. Reload with: sudo systemctl reload nginx${NC}" +else + echo -e "${RED}Nginx not found. Please install nginx first${NC}" +fi + +# Step 8: Start Services +echo -e "${GREEN}[8/8] Starting Services...${NC}" +if [ -f "$SCRIPT_DIR/start-services.sh" ]; then + bash "$SCRIPT_DIR/start-services.sh" +else + echo -e "${RED}Error: start-services.sh not found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}==========================================" +echo "Deployment Complete!" +echo "==========================================${NC}" +echo "" +echo -e "${BLUE}Generated Keys (saved to backEnd/.env and nginx config):${NC}" +echo -e "${GREEN}โœ“ SECRET_KEY: ${SECRET_KEY:0:20}...${NC}" +echo -e "${GREEN}โœ“ INTERNAL_API_KEY: ${INTERNAL_API_KEY:0:20}...${NC}" +echo "" +echo -e "${BLUE}Next Steps:${NC}" +echo "1. Update backEnd/.env with remaining configuration:" +echo " - DATABASE_URL (database credentials)" +echo " - Email settings (SMTP configuration)" +echo " - ALLOWED_HOSTS (your domain and server IP)" +echo " - ADMIN_ALLOWED_IPS (your admin IP address)" +echo "2. Create PostgreSQL database and user (if not done)" +echo "3. Run: sudo systemctl reload nginx" +echo "4. Run: sudo ufw enable (to enable firewall)" +echo "5. Check services: pm2 status" +echo "6. View logs: pm2 logs" +echo "" +echo -e "${BLUE}Service URLs:${NC}" +echo " Backend: http://127.0.0.1:1086" +echo " Frontend: http://127.0.0.1:1087" +echo " Public: https://gnxsoft.com (via nginx)" +echo "" +echo -e "${GREEN}Note: Keys have been automatically generated and configured!${NC}" +echo "" + diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 7a41bb9b..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,98 +0,0 @@ -version: '3.8' - -services: - postgres: - image: postgres:16-alpine - container_name: gnx-postgres - restart: unless-stopped - environment: - - POSTGRES_DB=${POSTGRES_DB:-gnxdb} - - POSTGRES_USER=${POSTGRES_USER:-gnx} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-change-this-password} - volumes: - - postgres_data:/var/lib/postgresql/data - networks: - - gnx-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-gnx}"] - interval: 10s - timeout: 5s - retries: 5 - - backend: - build: - context: ./backEnd - dockerfile: Dockerfile - container_name: gnx-backend - restart: unless-stopped - ports: - - "1086:1086" - env_file: - - .env.production - environment: - - DEBUG=False - - SECRET_KEY=${SECRET_KEY:-change-this-in-production} - - ALLOWED_HOSTS=${ALLOWED_HOSTS:-localhost,127.0.0.1,backend} - - DATABASE_URL=${DATABASE_URL:-postgresql://${POSTGRES_USER:-gnx}:${POSTGRES_PASSWORD:-change-this-password}@postgres:5432/${POSTGRES_DB:-gnxdb}} - - ADMIN_ALLOWED_IPS=${ADMIN_ALLOWED_IPS:-193.194.155.249} - - INTERNAL_API_KEY=${INTERNAL_API_KEY} - - EMAIL_BACKEND=${EMAIL_BACKEND:-django.core.mail.backends.console.EmailBackend} - - EMAIL_HOST=${EMAIL_HOST} - - EMAIL_PORT=${EMAIL_PORT:-587} - - EMAIL_USE_TLS=${EMAIL_USE_TLS:-True} - - EMAIL_HOST_USER=${EMAIL_HOST_USER} - - EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD} - - DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL:-noreply@gnxsoft.com} - - COMPANY_EMAIL=${COMPANY_EMAIL:-contact@gnxsoft.com} - volumes: - - ./backEnd/media:/app/media - - ./backEnd/staticfiles:/app/staticfiles - - ./backEnd/logs:/app/logs - depends_on: - postgres: - condition: service_healthy - networks: - - gnx-network - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:1086/admin/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - - frontend: - build: - context: ./frontEnd - dockerfile: Dockerfile - container_name: gnx-frontend - restart: unless-stopped - ports: - - "1087:1087" - env_file: - - .env.production - environment: - - NODE_ENV=production - - DOCKER_ENV=true - - NEXT_PUBLIC_API_URL=http://backend:1086 - - PORT=1087 - depends_on: - - backend - networks: - - gnx-network - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:1087/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - -networks: - gnx-network: - driver: bridge - -volumes: - postgres_data: - driver: local - media: - staticfiles: - diff --git a/docker-start.sh b/docker-start.sh deleted file mode 100755 index 8caedeeb..00000000 --- a/docker-start.sh +++ /dev/null @@ -1,240 +0,0 @@ -#!/bin/bash -# Docker startup script for GNX Web Application -# This script handles automatic setup, permissions, and startup - -set -e - -echo "๐Ÿš€ Starting GNX Web Application..." -echo "" - -# Set proper permissions for scripts and directories -echo "๐Ÿ”ง Setting up permissions..." - -# Make scripts executable -chmod +x docker-start.sh 2>/dev/null || true -chmod +x migrate-data.sh 2>/dev/null || true -chmod +x migrate-sqlite-to-postgres.sh 2>/dev/null || true - -# Set permissions for directories -mkdir -p backEnd/media backEnd/staticfiles backEnd/logs backups -chmod 755 backEnd/media backEnd/staticfiles backEnd/logs backups 2>/dev/null || true - -# Set permissions for database file if it exists -if [ -f "backEnd/db.sqlite3" ]; then - chmod 644 backEnd/db.sqlite3 2>/dev/null || true -fi - -# Set permissions for .env files -if [ -f ".env.production" ]; then - chmod 600 .env.production 2>/dev/null || true -fi - -echo "โœ… Permissions set" -echo "" - -# Check if .env.production exists -if [ ! -f .env.production ]; then - echo "โš ๏ธ Warning: .env.production not found. Creating from example..." - if [ -f .env.production.example ]; then - cp .env.production.example .env.production - echo "๐Ÿ“ Please edit .env.production with your actual values before continuing." - exit 1 - else - echo "โŒ Error: .env.production.example not found!" - exit 1 - fi -fi - -# Load environment variables -export $(cat .env.production | grep -v '^#' | xargs) - -# Configure Nginx -echo "๐Ÿ”ง Configuring Nginx..." - -# Check for existing nginx configs for gnxsoft -NGINX_AVAILABLE="/etc/nginx/sites-available/gnxsoft" -NGINX_ENABLED="/etc/nginx/sites-enabled/gnxsoft" -NGINX_CONF="nginx.conf" - -# Check if nginx.conf exists -if [ ! -f "$NGINX_CONF" ]; then - echo "โŒ Error: nginx.conf not found in current directory!" - exit 1 -fi - -# Backup and remove old configs if they exist -if [ -f "$NGINX_AVAILABLE" ]; then - echo "๐Ÿ“ฆ Backing up existing nginx config..." - sudo cp "$NGINX_AVAILABLE" "${NGINX_AVAILABLE}.backup.$(date +%Y%m%d_%H%M%S)" - echo "โœ… Old config backed up" -fi - -if [ -L "$NGINX_ENABLED" ]; then - echo "๐Ÿ”— Removing old symlink..." - sudo rm -f "$NGINX_ENABLED" -fi - -# Check for other gnxsoft configs and remove them -for file in /etc/nginx/sites-available/gnxsoft* /etc/nginx/sites-enabled/gnxsoft*; do - if [ -f "$file" ] || [ -L "$file" ]; then - if [ "$file" != "$NGINX_AVAILABLE" ] && [ "$file" != "$NGINX_ENABLED" ]; then - echo "๐Ÿ—‘๏ธ Removing old config: $file" - sudo rm -f "$file" - fi - fi -done - -# Copy new nginx config -echo "๐Ÿ“‹ Installing new nginx configuration..." -sudo cp "$NGINX_CONF" "$NGINX_AVAILABLE" - -# Create symlink -echo "๐Ÿ”— Creating symlink..." -sudo ln -sf "$NGINX_AVAILABLE" "$NGINX_ENABLED" - -# Update paths in nginx config if needed (using current directory) -CURRENT_DIR=$(pwd) -echo "๐Ÿ“ Updating paths in nginx config..." -sudo sed -i "s|/home/gnx/Desktop/GNX-WEB|$CURRENT_DIR|g" "$NGINX_AVAILABLE" - -# Generate or get INTERNAL_API_KEY -if [ -z "$INTERNAL_API_KEY" ] || [ "$INTERNAL_API_KEY" = "your-generated-key-here" ]; then - echo "๐Ÿ”‘ Generating new INTERNAL_API_KEY..." - INTERNAL_API_KEY=$(python3 -c "import secrets; print(secrets.token_urlsafe(32))" 2>/dev/null || openssl rand -base64 32 | tr -d "=+/" | cut -c1-32) - - # Update .env.production with the generated key - if [ -f .env.production ]; then - if grep -q "INTERNAL_API_KEY=" .env.production; then - sed -i "s|INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" .env.production - else - echo "INTERNAL_API_KEY=$INTERNAL_API_KEY" >> .env.production - fi - echo "โœ… Updated .env.production with generated INTERNAL_API_KEY" - fi - - # Export for use in this script - export INTERNAL_API_KEY -fi - -# Set INTERNAL_API_KEY in nginx config -echo "๐Ÿ”‘ Setting INTERNAL_API_KEY in nginx config..." -sudo sed -i "s|PLACEHOLDER_INTERNAL_API_KEY|$INTERNAL_API_KEY|g" "$NGINX_AVAILABLE" -echo "โœ… INTERNAL_API_KEY configured in nginx" - -# Test nginx configuration -echo "๐Ÿงช Testing nginx configuration..." -if sudo nginx -t; then - echo "โœ… Nginx configuration is valid" - echo "๐Ÿ”„ Reloading nginx..." - sudo systemctl reload nginx - echo "โœ… Nginx reloaded successfully" -else - echo "โŒ Nginx configuration test failed!" - echo "โš ๏ธ Please check the configuration manually" - exit 1 -fi - -# Build images -echo "๐Ÿ”จ Building Docker images..." -docker-compose build - -# Start containers -echo "โ–ถ๏ธ Starting containers..." -docker-compose up -d - -# Wait for services to be ready -echo "โณ Waiting for services to start..." -sleep 10 - -# Wait for PostgreSQL to be ready (if using PostgreSQL) -if echo "$DATABASE_URL" | grep -q "postgresql://"; then - echo "โณ Waiting for PostgreSQL to be ready..." - timeout=30 - while [ $timeout -gt 0 ]; do - if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then - echo "โœ… PostgreSQL is ready" - break - fi - echo " Waiting for PostgreSQL... ($timeout seconds remaining)" - sleep 2 - timeout=$((timeout - 2)) - done - if [ $timeout -le 0 ]; then - echo "โš ๏ธ Warning: PostgreSQL may not be ready, but continuing..." - fi - - # Check if we need to migrate from SQLite - if [ -f "./backEnd/db.sqlite3" ] && [ ! -f ".migrated_to_postgres" ]; then - echo "" - echo "๐Ÿ”„ SQLite database detected. Checking if migration is needed..." - - # Check if PostgreSQL database is empty (only has default tables) - POSTGRES_TABLES=$(docker-compose exec -T backend python manage.py shell -c " -from django.db import connection -cursor = connection.cursor() -cursor.execute(\"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name NOT LIKE 'django_%'\") -print(cursor.fetchone()[0]) -" 2>/dev/null | tail -1 || echo "0") - - # Check if SQLite has data - SQLITE_HAS_DATA=$(docker-compose exec -T backend bash -c " -export DATABASE_URL=sqlite:///db.sqlite3 -python manage.py shell -c \" -from django.contrib.auth.models import User -from django.db import connection -cursor = connection.cursor() -cursor.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name NOT LIKE \"sqlite_%\" AND name NOT LIKE \"django_%\"') -tables = cursor.fetchall() -has_data = False -for table in tables: - cursor.execute(f'SELECT COUNT(*) FROM {table[0]}') - if cursor.fetchone()[0] > 0: - has_data = True - break -print('1' if has_data else '0') -\" 2>/dev/null -" | tail -1 || echo "0") - - if [ "$SQLITE_HAS_DATA" = "1" ] && [ "$POSTGRES_TABLES" = "0" ] || [ "$POSTGRES_TABLES" -lt 5 ]; then - echo "๐Ÿ“ฆ SQLite database has data. Starting migration to PostgreSQL..." - echo " This may take a few minutes..." - echo "" - - # Run migration script - if [ -f "./migrate-sqlite-to-postgres.sh" ]; then - ./migrate-sqlite-to-postgres.sh - else - echo "โš ๏ธ Migration script not found. Please run manually:" - echo " ./migrate-sqlite-to-postgres.sh" - fi - else - echo "โœ… No migration needed (PostgreSQL already has data or SQLite is empty)" - touch .migrated_to_postgres - fi - fi -fi - -# Run migrations -echo "๐Ÿ“ฆ Running database migrations..." -docker-compose exec -T backend python manage.py migrate --noinput - -# Collect static files -echo "๐Ÿ“ Collecting static files..." -docker-compose exec -T backend python manage.py collectstatic --noinput - -# Check health -echo "๐Ÿฅ Checking service health..." -docker-compose ps - -echo "" -echo "โœ… GNX Web Application is running!" -echo "" -echo "Backend: http://localhost:1086" -echo "Frontend: http://localhost:1087" -echo "Nginx: Configured and running" -echo "" -echo "View logs: docker-compose logs -f" -echo "Stop services: docker-compose down" -echo "" -echo "๐Ÿ“‹ Nginx config location: $NGINX_AVAILABLE" - diff --git a/frontEnd/.dockerignore b/frontEnd/.dockerignore deleted file mode 100644 index d7763bd9..00000000 --- a/frontEnd/.dockerignore +++ /dev/null @@ -1,26 +0,0 @@ -node_modules -.next -.git -.gitignore -*.log -.env -.env.local -.env.development.local -.env.test.local -.env.production.local -npm-debug.log* -yarn-debug.log* -yarn-error.log* -.DS_Store -.vscode -.idea -*.swp -*.swo -*~ -coverage -.nyc_output -dist -build -README.md -*.md - diff --git a/frontEnd/Dockerfile b/frontEnd/Dockerfile deleted file mode 100644 index 4b5a3777..00000000 --- a/frontEnd/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# Next.js Frontend Dockerfile -FROM node:20-alpine AS base - -# Install dependencies only when needed -FROM base AS deps -RUN apk add --no-cache libc6-compat -WORKDIR /app - -# Copy package files -COPY package*.json ./ -RUN npm ci - -# Rebuild the source code only when needed -FROM base AS builder -WORKDIR /app -COPY --from=deps /app/node_modules ./node_modules -COPY . . - -# Set environment variables for build -ENV NEXT_TELEMETRY_DISABLED=1 -ENV NODE_ENV=production - -# Build Next.js -RUN npm run build - -# Production image, copy all the files and run next -FROM base AS runner -WORKDIR /app - -ENV NODE_ENV=production -ENV NEXT_TELEMETRY_DISABLED=1 - -RUN addgroup --system --gid 1001 nodejs -RUN adduser --system --uid 1001 nextjs - -# Copy necessary files from builder -COPY --from=builder /app/public ./public -COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ -COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static - -USER nextjs - -EXPOSE 1087 - -ENV PORT=1087 -ENV HOSTNAME="0.0.0.0" - -# Use the standalone server -CMD ["node", "server.js"] - diff --git a/frontEnd/app/services/[slug]/page.tsx b/frontEnd/app/services/[slug]/page.tsx index e544c33a..df3168c8 100644 --- a/frontEnd/app/services/[slug]/page.tsx +++ b/frontEnd/app/services/[slug]/page.tsx @@ -19,6 +19,11 @@ interface ServicePageProps { }>; } +// Force static generation - pages are pre-rendered at build time +export const dynamic = 'force-static'; +export const dynamicParams = false; // Return 404 for unknown slugs +export const revalidate = false; // Never revalidate - fully static + // Generate static params for all services (optional - for better performance) export async function generateStaticParams() { try { @@ -27,6 +32,7 @@ export async function generateStaticParams() { slug: service.slug, })); } catch (error) { + console.error('Error generating static params for services:', error); return []; } } diff --git a/frontEnd/app/support-center/page.tsx b/frontEnd/app/support-center/page.tsx index 1feba094..e5938b37 100644 --- a/frontEnd/app/support-center/page.tsx +++ b/frontEnd/app/support-center/page.tsx @@ -28,7 +28,8 @@ const SupportCenterPage = () => { url: "/support-center", }); - document.title = metadata.title || "Support Center | GNX Soft"; + const titleString = typeof metadata.title === 'string' ? metadata.title : "Support Center | GNX Soft"; + document.title = titleString; let metaDescription = document.querySelector('meta[name="description"]'); if (!metaDescription) { diff --git a/frontEnd/components/pages/case-study/Process.tsx b/frontEnd/components/pages/case-study/Process.tsx index 85a07bed..75e18783 100644 --- a/frontEnd/components/pages/case-study/Process.tsx +++ b/frontEnd/components/pages/case-study/Process.tsx @@ -12,6 +12,8 @@ const Process = ({ slug }: ProcessProps) => { return null; } + const processSteps = caseStudy.process_steps; + return (
@@ -28,7 +30,7 @@ const Process = ({ slug }: ProcessProps) => {
- {caseStudy.process_steps.map((step, index) => ( + {processSteps.map((step, index) => (
{String(step.step_number).padStart(2, '0')} @@ -37,7 +39,7 @@ const Process = ({ slug }: ProcessProps) => {

{step.title}

{step.description}

- {index < caseStudy.process_steps.length - 1 && ( + {index < processSteps.length - 1 && (
)}
diff --git a/frontEnd/components/pages/support/KnowledgeBase.tsx b/frontEnd/components/pages/support/KnowledgeBase.tsx index 4198db21..35c4c5bb 100644 --- a/frontEnd/components/pages/support/KnowledgeBase.tsx +++ b/frontEnd/components/pages/support/KnowledgeBase.tsx @@ -27,7 +27,7 @@ const KnowledgeBase = () => { const filtered = allArticles.filter(article => article.title.toLowerCase().includes(searchTerm.toLowerCase()) || article.summary.toLowerCase().includes(searchTerm.toLowerCase()) || - article.content.toLowerCase().includes(searchTerm.toLowerCase()) + (article.content && article.content.toLowerCase().includes(searchTerm.toLowerCase())) ); return { displayArticles: filtered, diff --git a/frontEnd/components/shared/layout/animations/SmoothScroll.tsx b/frontEnd/components/shared/layout/animations/SmoothScroll.tsx index 6d859d33..7169aa0d 100644 --- a/frontEnd/components/shared/layout/animations/SmoothScroll.tsx +++ b/frontEnd/components/shared/layout/animations/SmoothScroll.tsx @@ -70,7 +70,6 @@ const SmoothScroll = () => { gestureOrientation: 'vertical', smoothWheel: true, wheelMultiplier: 1, - smoothTouch: false, touchMultiplier: 2, infinite: false, }); diff --git a/frontEnd/lib/api/serviceService.ts b/frontEnd/lib/api/serviceService.ts index 92d691d3..5bf19f5e 100644 --- a/frontEnd/lib/api/serviceService.ts +++ b/frontEnd/lib/api/serviceService.ts @@ -1,4 +1,4 @@ -import { API_CONFIG } from '../config/api'; +import { API_CONFIG, getApiHeaders } from '../config/api'; // Types for Service API export interface ServiceFeature { @@ -104,9 +104,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -134,9 +132,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -164,9 +160,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -194,9 +188,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -224,9 +216,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -254,9 +244,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -284,9 +272,7 @@ export const serviceService = { const response = await fetch(url, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), }); if (!response.ok) { @@ -442,21 +428,30 @@ export const serviceUtils = { }, // Get service image URL + // Use relative URLs for same-domain images (Next.js can optimize via rewrites) + // Use absolute URLs only for external images getServiceImageUrl: (service: Service): string => { // If service has an uploaded image if (service.image && typeof service.image === 'string' && service.image.startsWith('/media/')) { - return `${API_CONFIG.BASE_URL}${service.image}`; + // Use relative URL - Next.js rewrite will handle fetching from backend during optimization + return service.image; } // If service has an image_url if (service.image_url) { if (service.image_url.startsWith('http')) { + // External URL - keep as absolute return service.image_url; } - return `${API_CONFIG.BASE_URL}${service.image_url}`; + if (service.image_url.startsWith('/media/')) { + // Same domain media - use relative URL + return service.image_url; + } + // Other relative URLs + return service.image_url; } - // Fallback to default image + // Fallback to default image (relative is fine for public images) return '/images/service/default.png'; }, diff --git a/frontEnd/lib/config/api.ts b/frontEnd/lib/config/api.ts index d27b5214..73ab9227 100644 --- a/frontEnd/lib/config/api.ts +++ b/frontEnd/lib/config/api.ts @@ -6,17 +6,62 @@ * In Production: Uses Next.js rewrites/nginx proxy at /api (internal network only) */ -// Production: Use relative URLs (nginx proxy) -// Development: Use full backend URL -// Docker: Use backend service name or port 1086 +// Production: Use relative URLs (nginx proxy) for client-side +// For server-side (SSR), use internal backend URL or public domain const isProduction = process.env.NODE_ENV === 'production'; -const isDocker = process.env.DOCKER_ENV === 'true'; -export const API_BASE_URL = isDocker - ? (process.env.NEXT_PUBLIC_API_URL || 'http://backend:1086') - : isProduction - ? '' // Use relative URLs in production (proxied by nginx) - : (process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8000'); +// Detect if we're on the server (Node.js) or client (browser) +const isServer = typeof window === 'undefined'; + +// For server-side rendering, we need an absolute URL +// During build time, use internal backend URL directly (faster, no SSL issues) +// At runtime, use public domain (goes through nginx which adds API key header) +const getServerApiUrl = () => { + if (isProduction) { + // Check if we're in build context (no access to window, and NEXT_PHASE might be set) + // During build, use internal backend URL directly + // At runtime (SSR), use public domain through nginx + const isBuildTime = process.env.NEXT_PHASE === 'phase-production-build' || + !process.env.NEXT_RUNTIME; + + if (isBuildTime) { + // Build time: use internal backend URL directly + return process.env.INTERNAL_API_URL || 'http://127.0.0.1:1086'; + } else { + // Runtime SSR: use public domain - nginx will proxy and add API key header + return process.env.NEXT_PUBLIC_SITE_URL || 'https://gnxsoft.com'; + } + } + return process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; +}; + +// For client-side, use relative URLs in production (proxied by nginx) +// For server-side, use absolute URLs +export const API_BASE_URL = isServer + ? getServerApiUrl() // Server-side: absolute URL + : (isProduction + ? '' // Client-side production: relative URLs (proxied by nginx) + : (process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086')); // Development: direct backend + +// Internal API key for server-side requests (must match backend INTERNAL_API_KEY) +// This is only used when calling backend directly (build time or internal requests) +export const INTERNAL_API_KEY = process.env.INTERNAL_API_KEY || '9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M'; + +// Helper to get headers for API requests +// Adds API key header when calling internal backend directly +export const getApiHeaders = (): Record => { + const headers: Record = { + 'Content-Type': 'application/json', + }; + + // If we're calling the internal backend directly (not through nginx), + // add the API key header + if (isServer && API_BASE_URL.includes('127.0.0.1:1086')) { + headers['X-Internal-API-Key'] = INTERNAL_API_KEY; + } + + return headers; +}; export const API_CONFIG = { // Django API Base URL diff --git a/frontEnd/next.config.js b/frontEnd/next.config.js index 10314235..5a9e1aa7 100644 --- a/frontEnd/next.config.js +++ b/frontEnd/next.config.js @@ -1,8 +1,10 @@ /** @type {import('next').NextConfig} */ const nextConfig = { - // Enable standalone output for Docker + // Enable standalone output for optimized production deployment output: 'standalone', images: { + // Enable image optimization in standalone mode + unoptimized: false, remotePatterns: [ { protocol: 'http', @@ -33,15 +35,60 @@ const nextConfig = { hostname: 'images.unsplash.com', pathname: '/**', }, - // Add your production domain when ready - // { - // protocol: 'https', - // hostname: 'your-api-domain.com', - // pathname: '/media/**', - // }, + // Production domain configuration + { + protocol: 'https', + hostname: 'gnxsoft.com', + pathname: '/media/**', + }, + { + protocol: 'https', + hostname: 'gnxsoft.com', + pathname: '/images/**', + }, + { + protocol: 'https', + hostname: 'gnxsoft.com', + pathname: '/_next/static/**', + }, + { + protocol: 'http', + hostname: 'gnxsoft.com', + pathname: '/media/**', + }, + { + protocol: 'http', + hostname: 'gnxsoft.com', + pathname: '/images/**', + }, + { + protocol: 'https', + hostname: 'www.gnxsoft.com', + pathname: '/media/**', + }, + { + protocol: 'https', + hostname: 'www.gnxsoft.com', + pathname: '/images/**', + }, + { + protocol: 'https', + hostname: 'www.gnxsoft.com', + pathname: '/_next/static/**', + }, + { + protocol: 'http', + hostname: 'www.gnxsoft.com', + pathname: '/media/**', + }, + { + protocol: 'http', + hostname: 'www.gnxsoft.com', + pathname: '/images/**', + }, ], // Legacy domains format for additional compatibility - domains: ['images.unsplash.com'], + domains: ['images.unsplash.com', 'gnxsoft.com', 'www.gnxsoft.com'], formats: ['image/avif', 'image/webp'], deviceSizes: [640, 750, 828, 1080, 1200, 1920, 2048, 3840], imageSizes: [16, 32, 48, 64, 96, 128, 256, 384], @@ -99,7 +146,7 @@ const nextConfig = { }, { key: 'Content-Security-Policy', - value: "default-src 'self'; script-src 'self' 'unsafe-eval' 'unsafe-inline' https://www.googletagmanager.com https://www.google-analytics.com; style-src 'self' 'unsafe-inline'; img-src 'self' data: https: http://localhost:8000 http://localhost:8080; font-src 'self' data:; connect-src 'self' http://localhost:8000 https://www.google-analytics.com; frame-src 'self' https://www.google.com; frame-ancestors 'self'; base-uri 'self'; form-action 'self'" + value: "default-src 'self'; script-src 'self' 'unsafe-eval' 'unsafe-inline' https://www.googletagmanager.com https://www.google-analytics.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https: http://localhost:8000 http://localhost:8080; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' http://localhost:8000 https://www.google-analytics.com; frame-src 'self' https://www.google.com; frame-ancestors 'self'; base-uri 'self'; form-action 'self'" }, // Performance Headers { @@ -153,7 +200,6 @@ const nextConfig = { // Rewrites for API proxy (Production: routes /api to backend through nginx) async rewrites() { // In development, proxy to Django backend - // In production, nginx handles this if (process.env.NODE_ENV === 'development') { return [ { @@ -166,8 +212,14 @@ const nextConfig = { }, ] } - // In production, these are handled by nginx reverse proxy - return [] + // In production, add rewrite for media files so Next.js image optimization can access them + // This allows Next.js to fetch media images from the internal backend during optimization + return [ + { + source: '/media/:path*', + destination: `${process.env.INTERNAL_API_URL || 'http://127.0.0.1:1086'}/media/:path*`, + }, + ] }, } diff --git a/install-postgresql.sh b/install-postgresql.sh new file mode 100755 index 00000000..0d1eaa2e --- /dev/null +++ b/install-postgresql.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# PostgreSQL Installation and Configuration Script for GNX-WEB +# This script installs PostgreSQL and configures it to use port 5433 +# to avoid conflicts with Docker PostgreSQL instance on port 5432 + +set -e + +echo "==========================================" +echo "PostgreSQL Installation Script" +echo "==========================================" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Check if running as root +if [ "$EUID" -ne 0 ]; then + echo -e "${RED}Please run as root (use sudo)${NC}" + exit 1 +fi + +# Update package list +echo -e "${GREEN}[1/7] Updating package list...${NC}" +apt-get update + +# Install PostgreSQL +echo -e "${GREEN}[2/7] Installing PostgreSQL...${NC}" +apt-get install -y postgresql postgresql-contrib + +# Get PostgreSQL version +PG_VERSION=$(psql --version | grep -oP '\d+' | head -1) +PG_MAJOR_VERSION=$(echo $PG_VERSION | cut -d. -f1) + +echo -e "${GREEN}[3/7] PostgreSQL version: $PG_VERSION${NC}" + +# Find postgresql.conf file +PG_CONF="/etc/postgresql/$PG_MAJOR_VERSION/main/postgresql.conf" + +if [ ! -f "$PG_CONF" ]; then + echo -e "${RED}Error: Could not find PostgreSQL configuration file${NC}" + exit 1 +fi + +# Backup original configuration +echo -e "${GREEN}[4/7] Backing up PostgreSQL configuration...${NC}" +cp "$PG_CONF" "${PG_CONF}.backup.$(date +%Y%m%d_%H%M%S)" + +# Configure PostgreSQL to use port 5433 +echo -e "${GREEN}[5/7] Configuring PostgreSQL to use port 5433...${NC}" + +# Check if port is already set +if grep -q "^port = " "$PG_CONF"; then + # Replace existing port setting + sed -i "s/^port = .*/port = 5433/" "$PG_CONF" +else + # Add port setting + echo "port = 5433" >> "$PG_CONF" +fi + +# Restart PostgreSQL +echo -e "${GREEN}[6/7] Restarting PostgreSQL...${NC}" +systemctl restart postgresql + +# Wait for PostgreSQL to start +sleep 2 + +# Verify PostgreSQL is running on port 5433 +if netstat -tlnp 2>/dev/null | grep -q ":5433" || ss -tlnp 2>/dev/null | grep -q ":5433"; then + echo -e "${GREEN}[7/7] PostgreSQL is running on port 5433${NC}" +else + echo -e "${YELLOW}Warning: Could not verify PostgreSQL is running on port 5433${NC}" +fi + +echo "" +echo -e "${GREEN}==========================================" +echo "PostgreSQL Installation Complete!" +echo "==========================================${NC}" +echo "" +echo "Next steps:" +echo "1. Create database and user:" +echo " sudo -u postgres psql" +echo " CREATE DATABASE gnx_db;" +echo " CREATE USER gnx_user WITH PASSWORD 'your_password';" +echo " GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user;" +echo " \\q" +echo "" +echo "2. Update your .env file with:" +echo " DATABASE_URL=postgresql://gnx_user:your_password@localhost:5433/gnx_db" +echo "" + diff --git a/migrate-data.sh b/migrate-data.sh deleted file mode 100755 index dc53d0e1..00000000 --- a/migrate-data.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# Simplified script to migrate SQLite data to PostgreSQL - -set -e - -echo "๐Ÿ”„ Migrating data from SQLite to PostgreSQL..." - -# Load environment -if [ -f .env.production ]; then - export $(cat .env.production | grep -v '^#' | xargs) -fi - -# Check if SQLite exists -if [ ! -f "./backEnd/db.sqlite3" ]; then - echo "โŒ SQLite database not found" - exit 1 -fi - -# Ensure containers are running -if ! docker-compose ps | grep -q "backend.*Up"; then - echo "โ–ถ๏ธ Starting containers..." - docker-compose up -d - sleep 10 -fi - -# Wait for PostgreSQL -echo "โณ Waiting for PostgreSQL..." -timeout=30 -while [ $timeout -gt 0 ]; do - if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then - break - fi - sleep 2 - timeout=$((timeout - 2)) -done - -# Create backup directory -mkdir -p ./backups -BACKUP_FILE="./backups/sqlite_export_$(date +%Y%m%d_%H%M%S).json" - -echo "๐Ÿ“ฆ Exporting from SQLite..." - -# Export using SQLite database -docker-compose exec -T backend bash -c " - # Temporarily use SQLite - export DATABASE_URL=sqlite:///db.sqlite3 - python manage.py dumpdata --natural-foreign --natural-primary \ - --exclude auth.permission \ - --exclude contenttypes \ - --indent 2 > /tmp/sqlite_export.json 2>&1 - cat /tmp/sqlite_export.json -" > "$BACKUP_FILE" - -echo "โœ… Exported to $BACKUP_FILE" - -# Run migrations on PostgreSQL -echo "๐Ÿ“ฆ Running migrations on PostgreSQL..." -docker-compose exec -T backend python manage.py migrate --noinput - -# Import into PostgreSQL -echo "๐Ÿ“ฅ Importing into PostgreSQL..." -docker-compose exec -T backend bash -c " - python manage.py loaddata /tmp/sqlite_export.json 2>&1 || echo 'Import completed with warnings' -" - -echo "โœ… Migration completed!" -echo "" -echo "๐Ÿ“Š Verifying migration..." - -# Count records -echo " Checking user count..." -USERS=$(docker-compose exec -T backend python manage.py shell -c "from django.contrib.auth.models import User; print(User.objects.count())" 2>/dev/null | tail -1) -echo " Users in PostgreSQL: $USERS" - -touch .migrated_to_postgres -echo "" -echo "โœ… Migration complete! Backend is now using PostgreSQL." - diff --git a/migrate-sqlite-to-postgres.sh b/migrate-sqlite-to-postgres.sh deleted file mode 100755 index 4fde73eb..00000000 --- a/migrate-sqlite-to-postgres.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/bash -# Script to migrate data from SQLite to PostgreSQL - -set -e - -echo "๐Ÿ”„ Starting SQLite to PostgreSQL Migration..." - -# Check if SQLite database exists -SQLITE_DB="./backEnd/db.sqlite3" -if [ ! -f "$SQLITE_DB" ]; then - echo "โŒ SQLite database not found at $SQLITE_DB" - exit 1 -fi - -echo "โœ… Found SQLite database" - -# Check if PostgreSQL is running -if ! docker-compose ps postgres | grep -q "Up"; then - echo "โŒ PostgreSQL container is not running. Please start it first:" - echo " docker-compose up -d postgres" - exit 1 -fi - -echo "โœ… PostgreSQL container is running" - -# Load environment variables -if [ -f .env.production ]; then - export $(cat .env.production | grep -v '^#' | xargs) -fi - -# Check if DATABASE_URL is set for PostgreSQL -if [ -z "$DATABASE_URL" ] || ! echo "$DATABASE_URL" | grep -q "postgresql://"; then - echo "โŒ DATABASE_URL is not set to PostgreSQL" - echo " Please update .env.production with PostgreSQL DATABASE_URL" - exit 1 -fi - -echo "โœ… PostgreSQL DATABASE_URL is configured" - -# Create backup directory -BACKUP_DIR="./backups" -mkdir -p "$BACKUP_DIR" -TIMESTAMP=$(date +%Y%m%d_%H%M%S) -BACKUP_FILE="$BACKUP_DIR/sqlite_backup_$TIMESTAMP.json" - -echo "๐Ÿ“ฆ Exporting data from SQLite..." -echo " Backup will be saved to: $BACKUP_FILE" - -# Export data from SQLite using Django's dumpdata -# First, temporarily switch to SQLite -docker-compose exec -T backend bash -c " - export DATABASE_URL=sqlite:///db.sqlite3 - python manage.py dumpdata --natural-foreign --natural-primary --exclude auth.permission --exclude contenttypes > /tmp/sqlite_export.json 2>&1 || true - cat /tmp/sqlite_export.json -" > "$BACKUP_FILE" - -# Check if export was successful -if [ ! -s "$BACKUP_FILE" ] || grep -q "Error\|Traceback\|Exception" "$BACKUP_FILE"; then - echo "โš ๏ธ Warning: Export may have issues, but continuing..." -fi - -echo "โœ… Data exported to $BACKUP_FILE" -echo " File size: $(du -h "$BACKUP_FILE" | cut -f1)" - -# Wait for PostgreSQL to be ready -echo "โณ Waiting for PostgreSQL to be ready..." -timeout=30 -while [ $timeout -gt 0 ]; do - if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then - echo "โœ… PostgreSQL is ready" - break - fi - echo " Waiting for PostgreSQL... ($timeout seconds remaining)" - sleep 2 - timeout=$((timeout - 2)) -done - -if [ $timeout -le 0 ]; then - echo "โŒ PostgreSQL is not ready. Please check the logs:" - echo " docker-compose logs postgres" - exit 1 -fi - -# Create database if it doesn't exist -echo "๐Ÿ“Š Ensuring PostgreSQL database exists..." -docker-compose exec -T postgres psql -U ${POSTGRES_USER:-gnx} -d postgres -c "SELECT 1 FROM pg_database WHERE datname='${POSTGRES_DB:-gnxdb}'" | grep -q 1 || \ -docker-compose exec -T postgres psql -U ${POSTGRES_USER:-gnx} -d postgres -c "CREATE DATABASE ${POSTGRES_DB:-gnxdb};" - -echo "โœ… Database exists or created" - -# Run migrations on PostgreSQL -echo "๐Ÿ“ฆ Running migrations on PostgreSQL..." -docker-compose exec -T backend python manage.py migrate --noinput - -echo "โœ… Migrations completed" - -# Import data into PostgreSQL -echo "๐Ÿ“ฅ Importing data into PostgreSQL..." -if docker-compose exec -T backend bash -c "python manage.py loaddata /tmp/sqlite_export.json" < "$BACKUP_FILE" 2>&1 | tee /tmp/import_log.txt; then - echo "โœ… Data imported successfully" -else - echo "โš ๏ธ Warning: Some data may not have imported. Check the log above." - echo " You can retry the import manually:" - echo " docker-compose exec backend python manage.py loaddata /tmp/sqlite_export.json" -fi - -# Verify data transfer -echo "๐Ÿ” Verifying data transfer..." -SQLITE_COUNT=$(docker-compose exec -T backend bash -c "export DATABASE_URL=sqlite:///db.sqlite3 && python manage.py shell -c \"from django.contrib.auth.models import User; print(User.objects.count())\"" 2>/dev/null | tail -1 || echo "0") -POSTGRES_COUNT=$(docker-compose exec -T backend python manage.py shell -c "from django.contrib.auth.models import User; print(User.objects.count())" 2>/dev/null | tail -1 || echo "0") - -echo "" -echo "๐Ÿ“Š Migration Summary:" -echo " SQLite Users: $SQLITE_COUNT" -echo " PostgreSQL Users: $POSTGRES_COUNT" -echo "" - -# Create a flag file to indicate migration is complete -touch .migrated_to_postgres - -echo "โœ… Migration completed!" -echo "" -echo "๐Ÿ“‹ Next steps:" -echo " 1. Verify the data in PostgreSQL:" -echo " docker-compose exec backend python manage.py shell" -echo "" -echo " 2. Test the application with PostgreSQL" -echo "" -echo " 3. Once verified, you can backup and remove SQLite:" -echo " mv backEnd/db.sqlite3 backEnd/db.sqlite3.backup" -echo "" -echo " Backup file saved at: $BACKUP_FILE" - diff --git a/nginx-gnxsoft.conf b/nginx-gnxsoft.conf index 9dbd7bd1..f4e62095 100644 --- a/nginx-gnxsoft.conf +++ b/nginx-gnxsoft.conf @@ -1,23 +1,31 @@ # Production Nginx Configuration for GNX Soft # Place this in /etc/nginx/sites-available/gnxsoft +# Symlink to /etc/nginx/sites-enabled/gnxsoft # -# DEPLOYMENT NOTES: -# 1. Frontend: Next.js production build runs on port 3000 -# - Build: npm run build -# - Start: npm start (or use PM2: pm2 start npm --name "gnxsoft-frontend" -- start) -# 2. Backend: Django runs on port 8000 (internal only) -# - Use Gunicorn: gunicorn gnx.wsgi:application --bind 127.0.0.1:8000 -# - Or PM2: pm2 start gunicorn --name "gnxsoft-backend" -- gnx.wsgi:application --bind 127.0.0.1:8000 +# DEPLOYMENT NOTES (Host Deployment): +# 1. Frontend: Next.js production build runs on port 1087 +# - Build: cd frontEnd && npm run build +# - Start: Use start-services.sh script or PM2: PORT=1087 pm2 start npm --name "gnxsoft-frontend" -- start +# 2. Backend: Django runs on port 1086 (internal only) +# - Use start-services.sh script or PM2: gunicorn gnx.wsgi:application --bind 127.0.0.1:1086 --workers 3 +# 3. Database: PostgreSQL on host (port 5433 to avoid conflict with Docker instance on 5432) +# 4. Use install-postgresql.sh to install and configure PostgreSQL +# 5. Use start-services.sh to start both backend and frontend services +# +# NOTE: Rate limiting zones must be defined in the main nginx.conf http context +# Add these lines to /etc/nginx/nginx.conf inside the http {} block: +# limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; +# limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s; -# Frontend - Public facing (Next.js Production Server) +# Frontend - Public facing (Next.js Production Server on port 1087) upstream frontend { - server 127.0.0.1:3000; + server 127.0.0.1:1087; keepalive 64; } -# Backend - Internal only (Django) +# Backend - Internal only (Django on port 1086) upstream backend_internal { - server 127.0.0.1:8000; + server 127.0.0.1:1086; keepalive 64; } @@ -61,9 +69,7 @@ server { add_header Referrer-Policy "strict-origin-when-cross-origin" always; add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), interest-cohort=()" always; - # Rate Limiting Zones - limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; - limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s; + # Rate Limiting (zones must be defined in main nginx.conf http context) limit_req_status 429; # Client settings @@ -75,7 +81,134 @@ server { access_log /var/log/nginx/gnxsoft_access.log; error_log /var/log/nginx/gnxsoft_error.log warn; - # Root location - Frontend (Next.js) + # IMPORTANT: More specific location blocks MUST come before location / + # Order matters in nginx - longest match wins + + # API Proxy - Frontend talks to backend ONLY through this internal proxy + # Backend port 1086 is BLOCKED from internet by firewall + location /api/ { + limit_req zone=api_limit burst=20 nodelay; + + # Internal proxy to backend (127.0.0.1:1086) + # Backend is NOT accessible from public internet + proxy_pass http://backend_internal/api/; + proxy_http_version 1.1; + + # Add internal API key (must match INTERNAL_API_KEY in Django .env) + set $api_key "9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M"; + proxy_set_header X-Internal-API-Key $api_key; + + # Backend sees request as coming from localhost + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # Hide backend server info + proxy_hide_header X-Powered-By; + proxy_hide_header Server; + + # Timeouts + proxy_connect_timeout 30s; + proxy_send_timeout 30s; + proxy_read_timeout 30s; + + # CORS headers (if needed) + add_header Access-Control-Allow-Origin "https://gnxsoft.com" always; + add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always; + add_header Access-Control-Allow-Headers "Authorization, Content-Type, X-Internal-API-Key" always; + add_header Access-Control-Allow-Credentials "true" always; + + # Handle preflight requests + if ($request_method = 'OPTIONS') { + return 204; + } + } + + # Media files (served by nginx directly for better performance) + location /media/ { + alias /var/www/GNX-WEB/backEnd/media/; + expires 30d; + add_header Cache-Control "public, immutable"; + access_log off; + + # Security + location ~ \.(php|py|pl|sh)$ { + deny all; + } + } + + # Static files (served by nginx directly) + location /static/ { + alias /var/www/GNX-WEB/backEnd/staticfiles/; + expires 1y; + add_header Cache-Control "public, immutable"; + access_log off; + } + + # Next.js image optimization API - must be proxied to Next.js server + # Use regex to match /_next/image with query strings + location ~ ^/_next/image { + proxy_pass http://frontend; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # Preserve query string + proxy_pass_request_headers on; + + # Timeouts for image processing + proxy_connect_timeout 30s; + proxy_send_timeout 30s; + proxy_read_timeout 30s; + + # Buffer settings for image processing + proxy_buffering on; + proxy_buffer_size 4k; + proxy_buffers 8 4k; + + # Cache optimized images + proxy_cache_valid 200 1d; + add_header Cache-Control "public, max-age=86400"; + } + + # Next.js static files - serve directly from filesystem for better performance + location /_next/static/ { + alias /var/www/GNX-WEB/frontEnd/.next/static/; + expires 1y; + add_header Cache-Control "public, immutable"; + access_log off; + + # Correct MIME types + types { + text/css css; + application/javascript js; + application/json json; + font/woff2 woff2; + font/woff woff; + font/ttf ttf; + image/png png; + image/jpeg jpg jpeg; + image/webp webp; + image/svg+xml svg; + } + } + + # Frontend public images + location /images/ { + alias /var/www/GNX-WEB/frontEnd/public/images/; + expires 30d; + add_header Cache-Control "public, immutable"; + access_log off; + } + + # Root location - Frontend (Next.js) - MUST be last location / { limit_req zone=general_limit burst=50 nodelay; @@ -95,72 +228,6 @@ server { proxy_read_timeout 60s; } - # API Proxy - Frontend talks to backend ONLY through this internal proxy - # Backend port 8000 is BLOCKED from internet by firewall - location /api/ { - limit_req zone=api_limit burst=20 nodelay; - - # Internal proxy to backend (127.0.0.1:8000) - # Backend is NOT accessible from public internet - proxy_pass http://backend_internal/api/; - proxy_http_version 1.1; - - # Backend sees request as coming from localhost - proxy_set_header Host $host; - proxy_set_header X-Real-IP 127.0.0.1; - proxy_set_header X-Forwarded-For 127.0.0.1; - proxy_set_header X-Forwarded-Proto $scheme; - - # Hide backend server info - proxy_hide_header X-Powered-By; - proxy_hide_header Server; - - # Timeouts - proxy_connect_timeout 30s; - proxy_send_timeout 30s; - proxy_read_timeout 30s; - - # CORS headers (if needed) - add_header Access-Control-Allow-Origin "https://gnxsoft.com" always; - add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always; - add_header Access-Control-Allow-Headers "Authorization, Content-Type" always; - add_header Access-Control-Allow-Credentials "true" always; - - # Handle preflight requests - if ($request_method = 'OPTIONS') { - return 204; - } - } - - # Media files (served by nginx directly for better performance) - location /media/ { - alias /var/www/gnxsoft/media/; - expires 30d; - add_header Cache-Control "public, immutable"; - access_log off; - - # Security - location ~ \.(php|py|pl|sh)$ { - deny all; - } - } - - # Static files (served by nginx directly) - location /static/ { - alias /var/www/gnxsoft/static/; - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - # Next.js static files - location /_next/static/ { - proxy_pass http://frontend; - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - # Deny access to hidden files location ~ /\. { deny all; @@ -168,10 +235,26 @@ server { log_not_found off; } - # Deny access to backend admin (extra security) - location /admin { - deny all; - return 404; + # Admin panel - Proxy to backend (with IP restriction) + location /admin/ { + # IP restriction is handled by Django middleware + # Add internal API key (must match INTERNAL_API_KEY in Django .env) + set $api_key "9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M"; + proxy_set_header X-Internal-API-Key $api_key; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_pass http://backend_internal; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + + # Timeouts + proxy_connect_timeout 30s; + proxy_send_timeout 30s; + proxy_read_timeout 30s; } # Health check endpoint @@ -185,11 +268,13 @@ server { # ============================================================================== # IMPORTANT SECURITY NOTES: # ============================================================================== -# 1. Backend runs on 127.0.0.1:8000 (internal only) -# 2. Firewall BLOCKS external access to port 8000 -# 3. Only nginx can reach backend (internal network) -# 4. Public internet can ONLY access nginx (ports 80, 443) -# 5. All API calls go through nginx proxy (/api/* โ†’ 127.0.0.1:8000/api/*) -# 6. Backend IP whitelist middleware ensures only localhost requests +# 1. Backend runs on 127.0.0.1:1086 (internal only) +# 2. Frontend runs on 127.0.0.1:1087 (internal only) +# 3. Firewall BLOCKS external access to ports 1086 and 1087 +# 4. Only nginx can reach backend/frontend (internal network) +# 5. Public internet can ONLY access nginx (ports 80, 443) +# 6. All API calls go through nginx proxy (/api/* โ†’ 127.0.0.1:1086/api/*) +# 7. Backend IP whitelist middleware ensures only localhost requests +# 8. Rate limiting zones must be added to /etc/nginx/nginx.conf http {} block +# 9. PostgreSQL runs on port 5433 (to avoid conflict with Docker on 5432) # ============================================================================== - diff --git a/nginx-rate-limit-zones.conf b/nginx-rate-limit-zones.conf new file mode 100644 index 00000000..94861fdf --- /dev/null +++ b/nginx-rate-limit-zones.conf @@ -0,0 +1,7 @@ +# Rate Limiting Zones for GNX-WEB +# Add these lines to /etc/nginx/nginx.conf inside the http {} block +# Or include this file in /etc/nginx/nginx.conf with: include /etc/nginx/conf.d/rate-limit-zones.conf; + +limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; +limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s; + diff --git a/nginx.conf b/nginx.conf deleted file mode 100644 index 470e4306..00000000 --- a/nginx.conf +++ /dev/null @@ -1,218 +0,0 @@ -# Production Nginx Configuration for GNX Soft (Docker) -# This configuration is for nginx running on the host machine -# It proxies to Docker containers: backend (1086) and frontend (1087) -# -# IMPORTANT PORT CONFIGURATION: -# - Backend (Django): Only accessible on port 1086 (internal) -# - Frontend (Next.js): Only accessible on port 1087 (internal) -# - Nginx: Public access on ports 80 (HTTP) and 443 (HTTPS) -# - Ports 1086 and 1087 should be blocked from external access by firewall - -# Frontend - Next.js running in Docker on port 1087 -# All frontend requests (/) are proxied here -upstream frontend { - server 127.0.0.1:1087; - keepalive 64; -} - -# Backend - Django running in Docker on port 1086 -# All API requests (/api/) and admin (/admin/) are proxied here -upstream backend_internal { - server 127.0.0.1:1086; - keepalive 64; -} - -# Redirect HTTP to HTTPS -server { - listen 80; - listen [::]:80; - server_name gnxsoft.com www.gnxsoft.com; - - # Let's Encrypt validation - location /.well-known/acme-challenge/ { - root /var/www/certbot; - } - - # Redirect all other traffic to HTTPS - location / { - return 301 https://$server_name$request_uri; - } -} - -# HTTPS Server -server { - listen 443 ssl http2; - listen [::]:443 ssl http2; - server_name gnxsoft.com www.gnxsoft.com; - - # SSL Configuration - ssl_certificate /etc/letsencrypt/live/gnxsoft.com/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/gnxsoft.com/privkey.pem; - ssl_protocols TLSv1.2 TLSv1.3; - ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384; - ssl_prefer_server_ciphers on; - ssl_session_cache shared:SSL:10m; - ssl_session_timeout 10m; - - # Security Headers - add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; - add_header X-Frame-Options "SAMEORIGIN" always; - add_header X-Content-Type-Options "nosniff" always; - add_header X-XSS-Protection "1; mode=block" always; - add_header Referrer-Policy "strict-origin-when-cross-origin" always; - add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), interest-cohort=()" always; - - # Rate Limiting Zones - limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; - limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s; - limit_req_status 429; - - # Client settings - client_max_body_size 10M; - client_body_timeout 30s; - client_header_timeout 30s; - - # Logging - access_log /var/log/nginx/gnxsoft_access.log; - error_log /var/log/nginx/gnxsoft_error.log warn; - - # Root location - Frontend (Next.js on port 1087) - location / { - limit_req zone=general_limit burst=50 nodelay; - - proxy_pass http://frontend; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_cache_bypass $http_upgrade; - - # Timeouts - proxy_connect_timeout 60s; - proxy_send_timeout 60s; - proxy_read_timeout 60s; - } - - # API Proxy - Frontend talks to backend through this proxy - # Backend runs in Docker on port 1086 (internal only) - location /api/ { - limit_req zone=api_limit burst=20 nodelay; - - # Internal proxy to backend Docker container (127.0.0.1:1086) - proxy_pass http://backend_internal/api/; - proxy_http_version 1.1; - - # Backend sees request as coming from nginx - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded-Port $server_port; - - # Add internal API key (will be replaced by docker-start.sh) - set $api_key "PLACEHOLDER_INTERNAL_API_KEY"; - proxy_set_header X-Internal-API-Key $api_key; - - # Hide backend server info - proxy_hide_header X-Powered-By; - proxy_hide_header Server; - - # Timeouts - proxy_connect_timeout 30s; - proxy_send_timeout 30s; - proxy_read_timeout 30s; - - # CORS headers (if needed) - add_header Access-Control-Allow-Origin "https://gnxsoft.com" always; - add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always; - add_header Access-Control-Allow-Headers "Authorization, Content-Type, X-Internal-API-Key" always; - add_header Access-Control-Allow-Credentials "true" always; - - # Handle preflight requests - if ($request_method = 'OPTIONS') { - return 204; - } - } - - # Media files - Served from Docker volume - location /media/ { - alias /home/gnx/Desktop/GNX-WEB/backEnd/media/; - expires 30d; - add_header Cache-Control "public, immutable"; - access_log off; - - # Security - deny execution of scripts - location ~ \.(php|py|pl|sh)$ { - deny all; - } - } - - # Static files - Served from Docker volume - location /static/ { - alias /home/gnx/Desktop/GNX-WEB/backEnd/staticfiles/; - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - # Next.js static files - location /_next/static/ { - proxy_pass http://frontend; - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - # Admin panel - Proxy to backend (with IP restriction) - location /admin/ { - # IP restriction is handled by Django middleware - # Add internal API key (will be replaced by docker-start.sh) - set $api_key "PLACEHOLDER_INTERNAL_API_KEY"; - proxy_set_header X-Internal-API-Key $api_key; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_pass http://backend_internal; - proxy_redirect off; - proxy_http_version 1.1; - proxy_set_header Connection ""; - - # Timeouts - proxy_connect_timeout 30s; - proxy_send_timeout 30s; - proxy_read_timeout 30s; - } - - # Deny access to hidden files - location ~ /\. { - deny all; - access_log off; - log_not_found off; - } - - # Health check endpoint - location /health { - access_log off; - return 200 "OK\n"; - add_header Content-Type text/plain; - } -} - -# ============================================================================== -# IMPORTANT NOTES: -# ============================================================================== -# 1. Backend runs in Docker on port 1086 (internal only) -# 2. Frontend runs in Docker on port 1087 -# 3. Nginx runs on host and proxies to Docker containers -# 4. Firewall should BLOCK external access to ports 1086 and 1087 -# 5. Only nginx (ports 80, 443) should be accessible from internet -# 6. Set INTERNAL_API_KEY environment variable in nginx config or systemd service -# 7. Update media/static paths to match your actual deployment location -# ============================================================================== - diff --git a/restart-services.sh b/restart-services.sh new file mode 100755 index 00000000..363738de --- /dev/null +++ b/restart-services.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# GNX-WEB Service Restart Script + +# Colors for output +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +echo -e "${GREEN}Restarting GNX-WEB services...${NC}" + +# Restart PM2 services +pm2 restart gnxsoft-backend 2>/dev/null || echo "Backend service not found" +pm2 restart gnxsoft-frontend 2>/dev/null || echo "Frontend service not found" + +echo -e "${GREEN}Services restarted${NC}" + diff --git a/setup.sh b/setup.sh deleted file mode 100755 index ba5f65bd..00000000 --- a/setup.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# Initial setup script - Run this once after extracting the zip file - -set -e - -echo "๐Ÿ”ง GNX Web Application - Initial Setup" -echo "======================================" -echo "" - -# Set all necessary permissions -echo "๐Ÿ“‹ Setting up file permissions..." - -# Make all scripts executable -find . -name "*.sh" -type f -exec chmod +x {} \; 2>/dev/null || true - -# Set directory permissions -mkdir -p backEnd/media backEnd/staticfiles backEnd/logs backups -chmod 755 backEnd/media backEnd/staticfiles backEnd/logs backups 2>/dev/null || true - -# Set file permissions -if [ -f "backEnd/db.sqlite3" ]; then - chmod 644 backEnd/db.sqlite3 2>/dev/null || true -fi - -if [ -f ".env.production" ]; then - chmod 600 .env.production 2>/dev/null || true -fi - -# Ensure docker-start.sh is executable -chmod +x docker-start.sh 2>/dev/null || true - -echo "โœ… Permissions configured" -echo "" - -# Check for required files -echo "๐Ÿ“‹ Checking required files..." - -REQUIRED_FILES=( - "docker-compose.yml" - "nginx.conf" - ".env.production" - "backEnd/Dockerfile" - "frontEnd/Dockerfile" -) - -MISSING_FILES=() -for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "$file" ]; then - MISSING_FILES+=("$file") - fi -done - -if [ ${#MISSING_FILES[@]} -gt 0 ]; then - echo "โŒ Missing required files:" - for file in "${MISSING_FILES[@]}"; do - echo " - $file" - done - exit 1 -fi - -echo "โœ… All required files present" -echo "" - -# Check Docker -if ! command -v docker &> /dev/null; then - echo "โŒ Docker is not installed. Please install Docker first." - exit 1 -fi - -if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then - echo "โŒ Docker Compose is not installed. Please install Docker Compose first." - exit 1 -fi - -echo "โœ… Docker is installed" -echo "" - -echo "โœ… Setup complete!" -echo "" -echo "๐Ÿ“‹ Next steps:" -echo " 1. Review and update .env.production with your settings" -echo " 2. Run: ./docker-start.sh" -echo "" - diff --git a/start-services.sh b/start-services.sh new file mode 100755 index 00000000..6554cb1c --- /dev/null +++ b/start-services.sh @@ -0,0 +1,258 @@ +#!/bin/bash + +# GNX-WEB Service Startup Script +# Starts backend on port 1086 and frontend on port 1087 + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Get script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BACKEND_DIR="$SCRIPT_DIR/backEnd" +FRONTEND_DIR="$SCRIPT_DIR/frontEnd" + +# Ports +BACKEND_PORT=1086 +FRONTEND_PORT=1087 + +echo -e "${BLUE}==========================================" +echo "GNX-WEB Service Startup" +echo "==========================================${NC}" + +# Check if PM2 is installed +if ! command -v pm2 &> /dev/null; then + echo -e "${YELLOW}PM2 is not installed. Installing PM2...${NC}" + npm install -g pm2 +fi + +# Function to check if port is in use +check_port() { + local port=$1 + if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || netstat -tlnp 2>/dev/null | grep -q ":$port " || ss -tlnp 2>/dev/null | grep -q ":$port "; then + return 0 + else + return 1 + fi +} + +# Check if ports are available +if check_port $BACKEND_PORT; then + echo -e "${YELLOW}Port $BACKEND_PORT is already in use. Stopping existing service...${NC}" + pm2 delete gnxsoft-backend 2>/dev/null || true + sleep 2 +fi + +if check_port $FRONTEND_PORT; then + echo -e "${YELLOW}Port $FRONTEND_PORT is already in use. Stopping existing service...${NC}" + pm2 delete gnxsoft-frontend 2>/dev/null || true + sleep 2 +fi + +# Check if backend directory exists +if [ ! -d "$BACKEND_DIR" ]; then + echo -e "${RED}Error: Backend directory not found at $BACKEND_DIR${NC}" + exit 1 +fi + +# Check if frontend directory exists +if [ ! -d "$FRONTEND_DIR" ]; then + echo -e "${RED}Error: Frontend directory not found at $FRONTEND_DIR${NC}" + exit 1 +fi + +# Function to generate secure random key +generate_secret_key() { + python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \ + openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1 +} + +# Check if backend .env exists +if [ ! -f "$BACKEND_DIR/.env" ]; then + echo -e "${YELLOW}Warning: Backend .env file not found. Creating from example...${NC}" + if [ -f "$BACKEND_DIR/production.env.example" ]; then + cp "$BACKEND_DIR/production.env.example" "$BACKEND_DIR/.env" + + # Generate and update keys automatically + echo -e "${BLUE}Generating secure keys...${NC}" + SECRET_KEY=$(generate_secret_key 50) + INTERNAL_API_KEY=$(generate_secret_key 32) + + # Update keys in .env file + sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env" + sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env" + sed -i "s|^STATIC_ROOT=.*|STATIC_ROOT=$BACKEND_DIR/staticfiles|" "$BACKEND_DIR/.env" + sed -i "s|^MEDIA_ROOT=.*|MEDIA_ROOT=$BACKEND_DIR/media|" "$BACKEND_DIR/.env" + + echo -e "${GREEN}โœ“ Generated and updated SECRET_KEY and INTERNAL_API_KEY${NC}" + echo -e "${YELLOW}Please update other values in $BACKEND_DIR/.env${NC}" + else + echo -e "${RED}Error: production.env.example not found${NC}" + exit 1 + fi +else + # Check if keys need to be generated + if grep -q "your-super-secret\|your-secure-api-key\|PLACEHOLDER" "$BACKEND_DIR/.env"; then + echo -e "${BLUE}Generating secure keys for existing .env file...${NC}" + SECRET_KEY=$(generate_secret_key 50) + INTERNAL_API_KEY=$(generate_secret_key 32) + + # Update keys in .env file + sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env" + sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env" + + echo -e "${GREEN}โœ“ Updated SECRET_KEY and INTERNAL_API_KEY${NC}" + + # Update nginx config if it exists + if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then + echo -e "${BLUE}Updating nginx configuration with INTERNAL_API_KEY...${NC}" + escaped_key=$(echo "$INTERNAL_API_KEY" | sed 's/[[\.*^$()+?{|]/\\&/g') + sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" /etc/nginx/sites-available/gnxsoft + echo -e "${GREEN}โœ“ Updated nginx config with INTERNAL_API_KEY${NC}" + fi + fi +fi + +# Start Backend +echo -e "${GREEN}[1/2] Starting Backend on port $BACKEND_PORT...${NC}" +cd "$BACKEND_DIR" + +# Check if virtual environment exists +if [ ! -d "venv" ]; then + echo -e "${YELLOW}Virtual environment not found. Creating...${NC}" + python3 -m venv venv +fi + +# Activate virtual environment +source venv/bin/activate + +# Install/update dependencies +if [ ! -f "venv/.deps_installed" ]; then + echo -e "${BLUE}Installing Python dependencies...${NC}" + pip install -r requirements.txt + touch venv/.deps_installed +fi + +# Run migrations +echo -e "${BLUE}Running database migrations...${NC}" +python manage.py migrate --noinput + +# Collect static files +echo -e "${BLUE}Collecting static files...${NC}" +python manage.py collectstatic --noinput + +# Create logs directory +mkdir -p logs + +# Start backend with PM2 +pm2 start gunicorn \ + --name "gnxsoft-backend" \ + --interpreter "$BACKEND_DIR/venv/bin/python" \ + -- \ + gnx.wsgi:application \ + --bind 127.0.0.1:$BACKEND_PORT \ + --workers 3 \ + --timeout 120 \ + --access-logfile "$BACKEND_DIR/logs/gunicorn_access.log" \ + --error-logfile "$BACKEND_DIR/logs/gunicorn_error.log" + +# Start Frontend +echo -e "${GREEN}[2/2] Starting Frontend on port $FRONTEND_PORT...${NC}" +cd "$FRONTEND_DIR" + +# Check if node_modules exists +if [ ! -d "node_modules" ]; then + echo -e "${YELLOW}Node modules not found. Installing...${NC}" + npm install +fi + +# Check if .next exists (build directory) +if [ ! -d ".next" ]; then + echo -e "${YELLOW}Frontend not built. Building...${NC}" + # Use production environment for build + NODE_ENV=production PORT=$FRONTEND_PORT npm run build +fi + +# Create .env.production if it doesn't exist +if [ ! -f ".env.production" ]; then + echo -e "${BLUE}Creating .env.production file...${NC}" + cat > .env.production << EOF +NEXT_PUBLIC_SITE_URL=https://gnxsoft.com +NEXT_PUBLIC_API_URL= +PORT=$FRONTEND_PORT +NODE_ENV=production +NEXT_TELEMETRY_DISABLED=1 +EOF + echo -e "${GREEN}โœ“ Created .env.production${NC}" +else + # Update PORT if it exists but is different + if ! grep -q "^PORT=$FRONTEND_PORT" .env.production; then + echo -e "${BLUE}Updating PORT in .env.production...${NC}" + if grep -q "^PORT=" .env.production; then + sed -i "s|^PORT=.*|PORT=$FRONTEND_PORT|" .env.production + else + echo "PORT=$FRONTEND_PORT" >> .env.production + fi + echo -e "${GREEN}โœ“ Updated PORT in .env.production${NC}" + fi + + # Ensure NODE_ENV is set to production + if ! grep -q "^NODE_ENV=production" .env.production; then + if grep -q "^NODE_ENV=" .env.production; then + sed -i "s|^NODE_ENV=.*|NODE_ENV=production|" .env.production + else + echo "NODE_ENV=production" >> .env.production + fi + fi +fi + +# Check if Next.js is using standalone output mode +if grep -q '"output":\s*"standalone"' next.config.js 2>/dev/null || grep -q "output:.*'standalone'" next.config.js 2>/dev/null; then + echo -e "${BLUE}Detected standalone mode. Starting with standalone server...${NC}" + + # Check if standalone server exists + if [ ! -f ".next/standalone/server.js" ]; then + echo -e "${YELLOW}Standalone server not found. Rebuilding...${NC}" + NODE_ENV=production PORT=$FRONTEND_PORT npm run build + fi + + # Start standalone server with PM2 + PORT=$FRONTEND_PORT NODE_ENV=production pm2 start node \ + --name "gnxsoft-frontend" \ + --cwd "$FRONTEND_DIR" \ + -- \ + ".next/standalone/server.js" +else + # Standard Next.js start + PORT=$FRONTEND_PORT NODE_ENV=production pm2 start npm \ + --name "gnxsoft-frontend" \ + -- start +fi + +# Save PM2 configuration +pm2 save + +echo "" +echo -e "${GREEN}==========================================" +echo "Services Started Successfully!" +echo "==========================================${NC}" +echo "" +echo -e "${BLUE}Backend:${NC} http://127.0.0.1:$BACKEND_PORT" +echo -e "${BLUE}Frontend:${NC} http://127.0.0.1:$FRONTEND_PORT" +echo "" +echo "PM2 Commands:" +echo " pm2 status - Check service status" +echo " pm2 logs gnxsoft-backend - View backend logs" +echo " pm2 logs gnxsoft-frontend - View frontend logs" +echo " pm2 restart all - Restart all services" +echo " pm2 stop all - Stop all services" +echo " pm2 delete all - Remove all services" +echo "" +echo -e "${YELLOW}Note: Make sure to configure nginx to proxy to these ports${NC}" +echo "" + diff --git a/stop-services.sh b/stop-services.sh new file mode 100755 index 00000000..eb60bf61 --- /dev/null +++ b/stop-services.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# GNX-WEB Service Stop Script + +# Colors for output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}Stopping GNX-WEB services...${NC}" + +# Stop PM2 services +pm2 stop gnxsoft-backend 2>/dev/null || echo -e "${YELLOW}Backend service not running${NC}" +pm2 stop gnxsoft-frontend 2>/dev/null || echo -e "${YELLOW}Frontend service not running${NC}" + +echo -e "${GREEN}Services stopped${NC}" + diff --git a/switch-to-sqlite.sh b/switch-to-sqlite.sh new file mode 100644 index 00000000..7da778da --- /dev/null +++ b/switch-to-sqlite.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Switch Django backend to use SQLite instead of PostgreSQL + +BACKEND_DIR="/var/www/GNX-WEB/backEnd" +BACKEND_ENV="$BACKEND_DIR/.env" + +echo "Switching to SQLite database..." + +if [ -f "$BACKEND_ENV" ]; then + # Comment out or remove DATABASE_URL line + if grep -q "^DATABASE_URL=" "$BACKEND_ENV"; then + echo "Commenting out DATABASE_URL to use SQLite..." + sed -i 's|^DATABASE_URL=.*|# DATABASE_URL= # Using SQLite instead|' "$BACKEND_ENV" + echo "โœ“ DATABASE_URL commented out" + fi + + # Ensure db.sqlite3 path is correct (it should be in backEnd directory) + echo "" + echo "SQLite database will be at: $BACKEND_DIR/db.sqlite3" + echo "" + echo "Restarting backend to apply changes..." + pm2 restart gnxsoft-backend + echo "โœ“ Backend restarted" + echo "" + echo "Checking database connection..." + cd "$BACKEND_DIR" + source venv/bin/activate + python manage.py check --database default +else + echo "Error: .env file not found at $BACKEND_ENV" + exit 1 +fi + diff --git a/systemd/gnxsoft-backend.service b/systemd/gnxsoft-backend.service new file mode 100644 index 00000000..18e87091 --- /dev/null +++ b/systemd/gnxsoft-backend.service @@ -0,0 +1,27 @@ +[Unit] +Description=GNX-WEB Django Backend (Gunicorn) +After=network.target postgresql.service +Requires=postgresql.service + +[Service] +Type=notify +User=gnx +Group=gnx +WorkingDirectory=/home/gnx/Desktop/GNX-WEB/backEnd +Environment="PATH=/home/gnx/Desktop/GNX-WEB/backEnd/venv/bin" +EnvironmentFile=/home/gnx/Desktop/GNX-WEB/backEnd/.env +ExecStart=/home/gnx/Desktop/GNX-WEB/backEnd/venv/bin/gunicorn \ + --bind 127.0.0.1:1086 \ + --workers 3 \ + --timeout 120 \ + --access-logfile /home/gnx/Desktop/GNX-WEB/backEnd/logs/gunicorn_access.log \ + --error-logfile /home/gnx/Desktop/GNX-WEB/backEnd/logs/gunicorn_error.log \ + --log-level info \ + gnx.wsgi:application +ExecReload=/bin/kill -s HUP $MAINPID +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target + diff --git a/systemd/gnxsoft-frontend.service b/systemd/gnxsoft-frontend.service new file mode 100644 index 00000000..ea0d246c --- /dev/null +++ b/systemd/gnxsoft-frontend.service @@ -0,0 +1,22 @@ +[Unit] +Description=GNX-WEB Next.js Frontend +After=network.target + +[Service] +Type=simple +User=gnx +Group=gnx +WorkingDirectory=/home/gnx/Desktop/GNX-WEB/frontEnd +Environment="NODE_ENV=production" +Environment="PORT=1087" +Environment="NEXT_TELEMETRY_DISABLED=1" +EnvironmentFile=/home/gnx/Desktop/GNX-WEB/frontEnd/.env.production +ExecStart=/usr/bin/npm start +Restart=always +RestartSec=10 +StandardOutput=journal +StandardError=journal + +[Install] +WantedBy=multi-user.target + diff --git a/update-keys.sh b/update-keys.sh new file mode 100755 index 00000000..9c216967 --- /dev/null +++ b/update-keys.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# GNX-WEB Key Update Script +# Regenerates and updates SECRET_KEY and INTERNAL_API_KEY + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Get script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BACKEND_DIR="$SCRIPT_DIR/backEnd" + +# Function to generate secure random key +generate_secret_key() { + python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \ + openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1 +} + +echo -e "${BLUE}==========================================" +echo "GNX-WEB Key Update Script" +echo "==========================================${NC}" +echo "" + +# Check if .env file exists +if [ ! -f "$BACKEND_DIR/.env" ]; then + echo -e "${RED}Error: .env file not found at $BACKEND_DIR/.env${NC}" + echo -e "${YELLOW}Please run deploy.sh first or create .env manually${NC}" + exit 1 +fi + +# Generate new keys +echo -e "${BLUE}Generating new secure keys...${NC}" +SECRET_KEY=$(generate_secret_key 50) +INTERNAL_API_KEY=$(generate_secret_key 32) + +# Update .env file +echo -e "${BLUE}Updating .env file...${NC}" +sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env" +sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env" +echo -e "${GREEN}โœ“ Updated SECRET_KEY${NC}" +echo -e "${GREEN}โœ“ Updated INTERNAL_API_KEY${NC}" + +# Update nginx config if it exists +if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then + echo -e "${BLUE}Updating nginx configuration...${NC}" + escaped_key=$(echo "$INTERNAL_API_KEY" | sed 's/[[\.*^$()+?{|]/\\&/g') + sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" /etc/nginx/sites-available/gnxsoft + echo -e "${GREEN}โœ“ Updated nginx config with INTERNAL_API_KEY${NC}" + + # Test nginx configuration + if sudo nginx -t >/dev/null 2>&1; then + echo -e "${GREEN}โœ“ Nginx configuration is valid${NC}" + echo -e "${YELLOW}Reload nginx with: sudo systemctl reload nginx${NC}" + else + echo -e "${RED}โœ— Nginx configuration has errors${NC}" + echo -e "${YELLOW}Please check manually: sudo nginx -t${NC}" + fi +else + echo -e "${YELLOW}โš  Nginx config not found. Update manually if needed.${NC}" +fi + +echo "" +echo -e "${GREEN}==========================================" +echo "Keys Updated Successfully!" +echo "==========================================${NC}" +echo "" +echo -e "${BLUE}New Keys:${NC}" +echo -e "${GREEN}SECRET_KEY: ${SECRET_KEY:0:30}...${NC}" +echo -e "${GREEN}INTERNAL_API_KEY: ${INTERNAL_API_KEY:0:30}...${NC}" +echo "" +echo -e "${YELLOW}Note: You may need to restart services for changes to take effect${NC}" +echo -e "${YELLOW}Run: ./restart-services.sh${NC}" +echo "" + diff --git a/verify-deployment.sh b/verify-deployment.sh new file mode 100755 index 00000000..c17735ca --- /dev/null +++ b/verify-deployment.sh @@ -0,0 +1,283 @@ +#!/bin/bash + +# GNX-WEB Deployment Verification Script +# Checks if all components are properly configured and running + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}==========================================" +echo "GNX-WEB Deployment Verification" +echo "==========================================${NC}" +echo "" + +ERRORS=0 +WARNINGS=0 + +# Function to check if command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +# Function to check if port is listening +port_listening() { + local port=$1 + if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || netstat -tlnp 2>/dev/null | grep -q ":$port " || ss -tlnp 2>/dev/null | grep -q ":$port "; then + return 0 + else + return 1 + fi +} + +# Check required commands +echo -e "${BLUE}Checking required commands...${NC}" +for cmd in python3 node npm nginx psql; do + if command_exists $cmd; then + echo -e "${GREEN}โœ“${NC} $cmd is installed" + else + echo -e "${RED}โœ—${NC} $cmd is NOT installed" + ((ERRORS++)) + fi +done + +# Check PM2 +if command_exists pm2; then + echo -e "${GREEN}โœ“${NC} PM2 is installed" +else + echo -e "${YELLOW}โš ${NC} PM2 is not installed (recommended for process management)" + ((WARNINGS++)) +fi + +echo "" + +# Check backend +echo -e "${BLUE}Checking Backend...${NC}" +if [ -f "backEnd/.env" ]; then + echo -e "${GREEN}โœ“${NC} Backend .env file exists" + + # Check for critical variables + if grep -q "SECRET_KEY=" backEnd/.env && ! grep -q "your-super-secret" backEnd/.env; then + echo -e "${GREEN}โœ“${NC} SECRET_KEY is set" + else + echo -e "${RED}โœ—${NC} SECRET_KEY not properly configured" + ((ERRORS++)) + fi + + if grep -q "INTERNAL_API_KEY=" backEnd/.env && ! grep -q "PLACEHOLDER\|your-secure-api-key" backEnd/.env; then + echo -e "${GREEN}โœ“${NC} INTERNAL_API_KEY is set" + else + echo -e "${RED}โœ—${NC} INTERNAL_API_KEY not properly configured" + ((ERRORS++)) + fi + + if grep -q "DATABASE_URL=" backEnd/.env && ! grep -q "your_password_here" backEnd/.env; then + echo -e "${GREEN}โœ“${NC} DATABASE_URL is configured" + else + echo -e "${YELLOW}โš ${NC} DATABASE_URL may not be configured" + ((WARNINGS++)) + fi +else + echo -e "${RED}โœ—${NC} Backend .env file not found" + ((ERRORS++)) +fi + +if [ -d "backEnd/venv" ]; then + echo -e "${GREEN}โœ“${NC} Backend virtual environment exists" +else + echo -e "${YELLOW}โš ${NC} Backend virtual environment not found" + ((WARNINGS++)) +fi + +if port_listening 1086; then + echo -e "${GREEN}โœ“${NC} Backend is running on port 1086" +else + echo -e "${YELLOW}โš ${NC} Backend is not running on port 1086" + ((WARNINGS++)) +fi + +echo "" + +# Check frontend +echo -e "${BLUE}Checking Frontend...${NC}" +if [ -f "frontEnd/.env.production" ]; then + echo -e "${GREEN}โœ“${NC} Frontend .env.production exists" +else + echo -e "${YELLOW}โš ${NC} Frontend .env.production not found" + ((WARNINGS++)) +fi + +if [ -d "frontEnd/node_modules" ]; then + echo -e "${GREEN}โœ“${NC} Frontend node_modules exists" +else + echo -e "${YELLOW}โš ${NC} Frontend node_modules not found (run npm install)" + ((WARNINGS++)) +fi + +if [ -d "frontEnd/.next" ]; then + echo -e "${GREEN}โœ“${NC} Frontend build exists" +else + echo -e "${YELLOW}โš ${NC} Frontend not built (run npm run build)" + ((WARNINGS++)) +fi + +if port_listening 1087; then + echo -e "${GREEN}โœ“${NC} Frontend is running on port 1087" +else + echo -e "${YELLOW}โš ${NC} Frontend is not running on port 1087" + ((WARNINGS++)) +fi + +echo "" + +# Check database +echo -e "${BLUE}Checking Database...${NC}" +if port_listening 5433; then + echo -e "${GREEN}โœ“${NC} PostgreSQL is running on port 5433" +else + echo -e "${YELLOW}โš ${NC} PostgreSQL is not running on port 5433" + ((WARNINGS++)) +fi + +if command_exists psql; then + DB_URL=$(grep "^DATABASE_URL=" backEnd/.env 2>/dev/null | cut -d'=' -f2-) + if [ -n "$DB_URL" ] && [[ "$DB_URL" == postgresql://* ]]; then + # Extract components from postgresql://user:password@host:port/database + DB_USER=$(echo "$DB_URL" | sed -n 's|.*://\([^:]*\):.*|\1|p') + DB_PASS=$(echo "$DB_URL" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p') + DB_HOST=$(echo "$DB_URL" | sed -n 's|.*@\([^:]*\):.*|\1|p') + DB_PORT=$(echo "$DB_URL" | sed -n 's|.*:\([0-9]*\)/.*|\1|p') + DB_NAME=$(echo "$DB_URL" | sed -n 's|.*/\([^?]*\).*|\1|p') + + if [ -n "$DB_USER" ] && [ -n "$DB_PASS" ] && [ -n "$DB_NAME" ]; then + if PGPASSWORD="$DB_PASS" psql -h "${DB_HOST:-localhost}" -p "${DB_PORT:-5433}" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1;" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} Database connection successful" + else + echo -e "${YELLOW}โš ${NC} Could not verify database connection (check credentials)" + ((WARNINGS++)) + fi + else + echo -e "${YELLOW}โš ${NC} Could not parse DATABASE_URL for connection test" + ((WARNINGS++)) + fi + else + echo -e "${YELLOW}โš ${NC} DATABASE_URL not found or invalid format" + ((WARNINGS++)) + fi +fi + +echo "" + +# Check nginx +echo -e "${BLUE}Checking Nginx...${NC}" +if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then + echo -e "${GREEN}โœ“${NC} Nginx configuration exists" + + if [ -L "/etc/nginx/sites-enabled/gnxsoft" ]; then + echo -e "${GREEN}โœ“${NC} Nginx site is enabled" + else + echo -e "${YELLOW}โš ${NC} Nginx site is not enabled" + ((WARNINGS++)) + fi + + if grep -q "PLACEHOLDER_INTERNAL_API_KEY" /etc/nginx/sites-available/gnxsoft; then + echo -e "${RED}โœ—${NC} Nginx config still has PLACEHOLDER_INTERNAL_API_KEY" + ((ERRORS++)) + else + echo -e "${GREEN}โœ“${NC} Nginx INTERNAL_API_KEY is configured" + fi +else + echo -e "${YELLOW}โš ${NC} Nginx configuration not found" + ((WARNINGS++)) +fi + +if systemctl is-active --quiet nginx 2>/dev/null; then + echo -e "${GREEN}โœ“${NC} Nginx is running" +else + echo -e "${YELLOW}โš ${NC} Nginx is not running" + ((WARNINGS++)) +fi + +if port_listening 80; then + echo -e "${GREEN}โœ“${NC} HTTP port 80 is listening" +else + echo -e "${YELLOW}โš ${NC} HTTP port 80 is not listening" + ((WARNINGS++)) +fi + +if port_listening 443; then + echo -e "${GREEN}โœ“${NC} HTTPS port 443 is listening" +else + echo -e "${YELLOW}โš ${NC} HTTPS port 443 is not listening" + ((WARNINGS++)) +fi + +echo "" + +# Check firewall +echo -e "${BLUE}Checking Firewall...${NC}" +if command_exists ufw; then + if ufw status | grep -q "Status: active"; then + echo -e "${GREEN}โœ“${NC} UFW firewall is active" + else + echo -e "${YELLOW}โš ${NC} UFW firewall is not active" + ((WARNINGS++)) + fi +else + echo -e "${YELLOW}โš ${NC} UFW not found (firewall may be managed differently)" + ((WARNINGS++)) +fi + +echo "" + +# Check PM2 services +if command_exists pm2; then + echo -e "${BLUE}Checking PM2 Services...${NC}" + if pm2 list | grep -q "gnxsoft-backend"; then + if pm2 list | grep -q "gnxsoft-backend.*online"; then + echo -e "${GREEN}โœ“${NC} Backend service is running in PM2" + else + echo -e "${YELLOW}โš ${NC} Backend service exists but may not be online" + ((WARNINGS++)) + fi + else + echo -e "${YELLOW}โš ${NC} Backend service not found in PM2" + ((WARNINGS++)) + fi + + if pm2 list | grep -q "gnxsoft-frontend"; then + if pm2 list | grep -q "gnxsoft-frontend.*online"; then + echo -e "${GREEN}โœ“${NC} Frontend service is running in PM2" + else + echo -e "${YELLOW}โš ${NC} Frontend service exists but may not be online" + ((WARNINGS++)) + fi + else + echo -e "${YELLOW}โš ${NC} Frontend service not found in PM2" + ((WARNINGS++)) + fi +fi + +echo "" +echo -e "${BLUE}==========================================" +echo "Verification Summary" +echo "==========================================${NC}" + +if [ $ERRORS -eq 0 ] && [ $WARNINGS -eq 0 ]; then + echo -e "${GREEN}โœ“ All checks passed!${NC}" + exit 0 +elif [ $ERRORS -eq 0 ]; then + echo -e "${YELLOW}โš  $WARNINGS warning(s) found${NC}" + echo -e "${GREEN}โœ“ No critical errors${NC}" + exit 0 +else + echo -e "${RED}โœ— $ERRORS error(s) found${NC}" + if [ $WARNINGS -gt 0 ]; then + echo -e "${YELLOW}โš  $WARNINGS warning(s) found${NC}" + fi + exit 1 +fi + From 4c8b71fe0d2e0e7810debacf2530b244b713f951 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Tue, 25 Nov 2025 09:21:00 +0200 Subject: [PATCH 2/7] updates --- frontEnd/app/career/[slug]/page.tsx | 147 +++++++++++--------------- frontEnd/app/services/[slug]/page.tsx | 95 ++++++++++++----- frontEnd/lib/api/serviceService.ts | 42 +++++--- 3 files changed, 162 insertions(+), 122 deletions(-) diff --git a/frontEnd/app/career/[slug]/page.tsx b/frontEnd/app/career/[slug]/page.tsx index 81c072fe..f46f6e05 100644 --- a/frontEnd/app/career/[slug]/page.tsx +++ b/frontEnd/app/career/[slug]/page.tsx @@ -1,110 +1,89 @@ -"use client"; - -import { useParams } from "next/navigation"; -import { useEffect } from "react"; -import Link from "next/link"; +import { Metadata } from 'next'; +import { notFound } from 'next/navigation'; import Header from "@/components/shared/layout/header/Header"; import JobSingle from "@/components/pages/career/JobSingle"; import Footer from "@/components/shared/layout/footer/Footer"; import CareerScrollProgressButton from "@/components/pages/career/CareerScrollProgressButton"; import CareerInitAnimations from "@/components/pages/career/CareerInitAnimations"; -import { useJob } from "@/lib/hooks/useCareer"; +import { JobPosition } from "@/lib/api/careerService"; import { generateCareerMetadata } from "@/lib/seo/metadata"; +import { API_CONFIG, getApiHeaders } from "@/lib/config/api"; -const JobPage = () => { - const params = useParams(); - const slug = params?.slug as string; - const { job, loading, error } = useJob(slug); +interface JobPageProps { + params: Promise<{ + slug: string; + }>; +} - // Update metadata dynamically for client component - useEffect(() => { - if (job) { - const metadata = generateCareerMetadata(job); - const title = typeof metadata.title === 'string' ? metadata.title : `Career - ${job.title} | GNX Soft`; - document.title = title; - - // Update meta description - let metaDescription = document.querySelector('meta[name="description"]'); - if (!metaDescription) { - metaDescription = document.createElement('meta'); - metaDescription.setAttribute('name', 'description'); - document.head.appendChild(metaDescription); +// Generate metadata for each job page +export async function generateMetadata({ params }: JobPageProps): Promise { + const { slug } = await params; + + try { + const response = await fetch( + `${API_CONFIG.BASE_URL}/api/career/jobs/${slug}/`, + { + method: 'GET', + headers: getApiHeaders(), + next: { revalidate: 3600 }, // Revalidate every hour } - const description = typeof metadata.description === 'string' ? metadata.description : `Apply for ${job.title} at GNX Soft. ${job.location || 'Remote'} position.`; - metaDescription.setAttribute('content', description); + ); - // Update canonical URL - let canonical = document.querySelector('link[rel="canonical"]'); - if (!canonical) { - canonical = document.createElement('link'); - canonical.setAttribute('rel', 'canonical'); - document.head.appendChild(canonical); - } - canonical.setAttribute('href', `${window.location.origin}/career/${job.slug}`); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); } - }, [job]); - if (loading) { + const job = await response.json(); + + return generateCareerMetadata({ + title: job.title, + description: job.short_description || job.about_role, + slug: job.slug, + location: job.location, + department: job.department, + employment_type: job.employment_type, + }); + } catch (error) { + return { + title: 'Job Not Found | GNX Soft', + description: 'The requested job position could not be found.', + }; + } +} + +const JobPage = async ({ params }: JobPageProps) => { + const { slug } = await params; + + try { + const response = await fetch( + `${API_CONFIG.BASE_URL}/api/career/jobs/${slug}/`, + { + method: 'GET', + headers: getApiHeaders(), + next: { revalidate: 3600 }, // Revalidate every hour + } + ); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const job: JobPosition = await response.json(); + return (
-
-
-
-
-

Loading job details...

-
-
-
-
+
); + } catch (error) { + notFound(); } - - if (error || !job) { - return ( -
-
-
-
-
-
-
-

Job Not Found

-

- The job position you are looking for does not exist or is no longer available. -

- - View All Positions - -
-
-
-
-
-
- - -
- ); - } - - return ( -
-
-
- -
-
- - -
- ); }; export default JobPage; diff --git a/frontEnd/app/services/[slug]/page.tsx b/frontEnd/app/services/[slug]/page.tsx index df3168c8..5b5802db 100644 --- a/frontEnd/app/services/[slug]/page.tsx +++ b/frontEnd/app/services/[slug]/page.tsx @@ -9,9 +9,10 @@ import Transform from "@/components/pages/services/Transform"; import Footer from "@/components/shared/layout/footer/Footer"; import ServicesScrollProgressButton from "@/components/pages/services/ServicesScrollProgressButton"; import ServicesInitAnimations from "@/components/pages/services/ServicesInitAnimations"; -import { serviceService, Service } from "@/lib/api/serviceService"; +import { Service } from "@/lib/api/serviceService"; import { generateServiceMetadata } from "@/lib/seo/metadata"; import { ServiceSchema, BreadcrumbSchema } from "@/components/shared/seo/StructuredData"; +import { API_CONFIG } from "@/lib/config/api"; interface ServicePageProps { params: Promise<{ @@ -19,16 +20,30 @@ interface ServicePageProps { }>; } -// Force static generation - pages are pre-rendered at build time -export const dynamic = 'force-static'; -export const dynamicParams = false; // Return 404 for unknown slugs -export const revalidate = false; // Never revalidate - fully static - -// Generate static params for all services (optional - for better performance) +// Generate static params for all services at build time (optional - for better performance) +// This pre-generates known pages, but new pages can still be generated on-demand export async function generateStaticParams() { try { - const services = await serviceService.getServices(); - return services.results.map((service: Service) => ({ + const response = await fetch( + `${API_CONFIG.BASE_URL}/api/services/`, + { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + next: { revalidate: 60 }, // Revalidate every minute for faster image updates + } + ); + + if (!response.ok) { + console.error('Error fetching services for static params:', response.status); + return []; + } + + const data = await response.json(); + const services = data.results || data; + + return services.map((service: Service) => ({ slug: service.slug, })); } catch (error) { @@ -39,9 +54,25 @@ export async function generateStaticParams() { // Generate enhanced metadata for each service page export async function generateMetadata({ params }: ServicePageProps) { + const { slug } = await params; + try { - const { slug } = await params; - const service = await serviceService.getServiceBySlug(slug); + const response = await fetch( + `${API_CONFIG.BASE_URL}/api/services/${slug}/`, + { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + next: { revalidate: 60 }, // Revalidate every minute for faster image updates + } + ); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const service = await response.json(); return generateServiceMetadata(service); } catch (error) { @@ -53,23 +84,34 @@ export async function generateMetadata({ params }: ServicePageProps) { } const ServicePage = async ({ params }: ServicePageProps) => { - let service: Service; + const { slug } = await params; try { - const { slug } = await params; - service = await serviceService.getServiceBySlug(slug); - } catch (error) { - notFound(); - } + const response = await fetch( + `${API_CONFIG.BASE_URL}/api/services/${slug}/`, + { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + next: { revalidate: 60 }, // Revalidate every minute for faster image updates + } + ); - // Breadcrumb data for structured data - const breadcrumbItems = [ - { name: 'Home', url: '/' }, - { name: 'Services', url: '/services' }, - { name: service.title, url: `/services/${service.slug}` }, - ]; + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } - return ( + const service: Service = await response.json(); + + // Breadcrumb data for structured data + const breadcrumbItems = [ + { name: 'Home', url: '/' }, + { name: 'Services', url: '/services' }, + { name: service.title, url: `/services/${service.slug}` }, + ]; + + return (
{/* SEO Structured Data */} @@ -88,7 +130,10 @@ const ServicePage = async ({ params }: ServicePageProps) => {
- ); + ); + } catch (error) { + notFound(); + } }; export default ServicePage; diff --git a/frontEnd/lib/api/serviceService.ts b/frontEnd/lib/api/serviceService.ts index 5bf19f5e..36f97538 100644 --- a/frontEnd/lib/api/serviceService.ts +++ b/frontEnd/lib/api/serviceService.ts @@ -427,32 +427,48 @@ export const serviceUtils = { }).format(numPrice); }, - // Get service image URL + // Get service image URL with cache-busting // Use relative URLs for same-domain images (Next.js can optimize via rewrites) // Use absolute URLs only for external images + // Adds updated_at timestamp as query parameter for cache-busting when images change getServiceImageUrl: (service: Service): string => { + let imageUrl: string = ''; + // If service has an uploaded image if (service.image && typeof service.image === 'string' && service.image.startsWith('/media/')) { - // Use relative URL - Next.js rewrite will handle fetching from backend during optimization - return service.image; + imageUrl = service.image; } - // If service has an image_url - if (service.image_url) { + else if (service.image_url) { if (service.image_url.startsWith('http')) { // External URL - keep as absolute - return service.image_url; - } - if (service.image_url.startsWith('/media/')) { + imageUrl = service.image_url; + } else if (service.image_url.startsWith('/media/')) { // Same domain media - use relative URL - return service.image_url; + imageUrl = service.image_url; + } else { + // Other relative URLs + imageUrl = service.image_url; } - // Other relative URLs - return service.image_url; + } else { + // Fallback to default image (relative is fine for public images) + imageUrl = '/images/service/default.png'; } - // Fallback to default image (relative is fine for public images) - return '/images/service/default.png'; + // Add cache-busting query parameter using updated_at timestamp + // This ensures images refresh when service is updated + if (service.updated_at && imageUrl && !imageUrl.includes('?')) { + try { + const timestamp = new Date(service.updated_at).getTime(); + const separator = imageUrl.includes('?') ? '&' : '?'; + imageUrl = `${imageUrl}${separator}v=${timestamp}`; + } catch (error) { + // If date parsing fails, just return the URL without cache-busting + console.warn('Failed to parse updated_at for cache-busting:', error); + } + } + + return imageUrl; }, // Generate service slug from title From 8823edc8b393790834529d2a203de1d23fccaa51 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Tue, 25 Nov 2025 11:16:05 +0200 Subject: [PATCH 3/7] updates --- frontEnd/app/services/[slug]/page.tsx | 26 +++++++++++++------------- nginx-gnxsoft.conf | 16 +++++++++++++--- start-services.sh | 13 +++++++++++++ 3 files changed, 39 insertions(+), 16 deletions(-) diff --git a/frontEnd/app/services/[slug]/page.tsx b/frontEnd/app/services/[slug]/page.tsx index 5b5802db..5a4ca70c 100644 --- a/frontEnd/app/services/[slug]/page.tsx +++ b/frontEnd/app/services/[slug]/page.tsx @@ -12,7 +12,7 @@ import ServicesInitAnimations from "@/components/pages/services/ServicesInitAnim import { Service } from "@/lib/api/serviceService"; import { generateServiceMetadata } from "@/lib/seo/metadata"; import { ServiceSchema, BreadcrumbSchema } from "@/components/shared/seo/StructuredData"; -import { API_CONFIG } from "@/lib/config/api"; +import { API_CONFIG, getApiHeaders } from "@/lib/config/api"; interface ServicePageProps { params: Promise<{ @@ -24,13 +24,13 @@ interface ServicePageProps { // This pre-generates known pages, but new pages can still be generated on-demand export async function generateStaticParams() { try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${API_CONFIG.BASE_URL}/api/services/`, + `${apiUrl}/api/services/`, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); @@ -57,13 +57,13 @@ export async function generateMetadata({ params }: ServicePageProps) { const { slug } = await params; try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${API_CONFIG.BASE_URL}/api/services/${slug}/`, + `${apiUrl}/api/services/${slug}/`, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); @@ -87,13 +87,13 @@ const ServicePage = async ({ params }: ServicePageProps) => { const { slug } = await params; try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${API_CONFIG.BASE_URL}/api/services/${slug}/`, + `${apiUrl}/api/services/${slug}/`, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers: getApiHeaders(), next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); diff --git a/nginx-gnxsoft.conf b/nginx-gnxsoft.conf index f4e62095..f2689436 100644 --- a/nginx-gnxsoft.conf +++ b/nginx-gnxsoft.conf @@ -200,12 +200,22 @@ server { } } - # Frontend public images - location /images/ { - alias /var/www/GNX-WEB/frontEnd/public/images/; + # Frontend public images - must come before root location + location ~ ^/images/(.*)$ { + alias /var/www/GNX-WEB/frontEnd/public/images/$1; expires 30d; add_header Cache-Control "public, immutable"; access_log off; + + # Ensure proper MIME types + types { + image/png png; + image/jpeg jpg jpeg; + image/gif gif; + image/svg+xml svg; + image/webp webp; + } + default_type application/octet-stream; } # Root location - Frontend (Next.js) - MUST be last diff --git a/start-services.sh b/start-services.sh index 6554cb1c..e140b5b7 100755 --- a/start-services.sh +++ b/start-services.sh @@ -221,6 +221,19 @@ if grep -q '"output":\s*"standalone"' next.config.js 2>/dev/null || grep -q "out NODE_ENV=production PORT=$FRONTEND_PORT npm run build fi + # Ensure public folder is copied to standalone directory + if [ ! -d ".next/standalone/public" ]; then + echo -e "${BLUE}Copying public folder to standalone directory...${NC}" + cp -r public .next/standalone/ + fi + + # Ensure static folder symlink exists for image optimization + if [ ! -L ".next/standalone/.next/static" ] && [ ! -d ".next/standalone/.next/static" ]; then + echo -e "${BLUE}Creating symlink for static files...${NC}" + mkdir -p .next/standalone/.next + ln -s ../../static .next/standalone/.next/static + fi + # Start standalone server with PM2 PORT=$FRONTEND_PORT NODE_ENV=production pm2 start node \ --name "gnxsoft-frontend" \ From e639736187f61a7d118c64849d1149012c42caf1 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Tue, 25 Nov 2025 20:18:23 +0200 Subject: [PATCH 4/7] updates --- frontEnd/app/career/[slug]/page.tsx | 44 ++++++++++-- frontEnd/app/services/[slug]/page.tsx | 26 +++---- frontEnd/components/pages/career/Thrive.tsx | 12 ++-- .../components/pages/case-study/CaseItems.tsx | 3 +- .../pages/case-study/CaseSingle.tsx | 2 - .../pages/case-study/RelatedCase.tsx | 3 +- .../pages/contact/ContactSection.tsx | 1 - frontEnd/components/pages/home/Overview.tsx | 15 +++-- .../components/pages/home/ServiceIntro.tsx | 3 +- .../components/pages/services/Transform.tsx | 4 +- .../shared/layout/header/OffcanvasMenu.tsx | 3 +- frontEnd/lib/config/api.ts | 67 ++++++++++++++++++- frontEnd/lib/imageUtils.ts | 50 ++++++++++++-- frontEnd/next.config.js | 6 +- frontEnd/public/styles/layout/_banner.scss | 3 +- nginx-gnxsoft.conf | 9 ++- 16 files changed, 190 insertions(+), 61 deletions(-) diff --git a/frontEnd/app/career/[slug]/page.tsx b/frontEnd/app/career/[slug]/page.tsx index f46f6e05..9ffd042b 100644 --- a/frontEnd/app/career/[slug]/page.tsx +++ b/frontEnd/app/career/[slug]/page.tsx @@ -15,17 +15,51 @@ interface JobPageProps { }>; } +// Generate static params for all job positions at build time (optional - for better performance) +// This pre-generates known pages, but new pages can still be generated on-demand +export async function generateStaticParams() { + try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; + const response = await fetch( + `${apiUrl}/api/career/jobs`, + { + method: 'GET', + headers: getApiHeaders(), + next: { revalidate: 60 }, // Revalidate every minute + } + ); + + if (!response.ok) { + console.error('Error fetching jobs for static params:', response.status); + return []; + } + + const data = await response.json(); + const jobs = data.results || data; + + return jobs.map((job: JobPosition) => ({ + slug: job.slug, + })); + } catch (error) { + console.error('Error generating static params for jobs:', error); + return []; + } +} + // Generate metadata for each job page export async function generateMetadata({ params }: JobPageProps): Promise { const { slug } = await params; try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${API_CONFIG.BASE_URL}/api/career/jobs/${slug}/`, + `${apiUrl}/api/career/jobs/${slug}`, { method: 'GET', headers: getApiHeaders(), - next: { revalidate: 3600 }, // Revalidate every hour + next: { revalidate: 60 }, // Revalidate every minute } ); @@ -55,12 +89,14 @@ const JobPage = async ({ params }: JobPageProps) => { const { slug } = await params; try { + // Use internal API URL for server-side requests + const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${API_CONFIG.BASE_URL}/api/career/jobs/${slug}/`, + `${apiUrl}/api/career/jobs/${slug}`, { method: 'GET', headers: getApiHeaders(), - next: { revalidate: 3600 }, // Revalidate every hour + next: { revalidate: 60 }, // Revalidate every minute } ); diff --git a/frontEnd/app/services/[slug]/page.tsx b/frontEnd/app/services/[slug]/page.tsx index 5a4ca70c..5b5802db 100644 --- a/frontEnd/app/services/[slug]/page.tsx +++ b/frontEnd/app/services/[slug]/page.tsx @@ -12,7 +12,7 @@ import ServicesInitAnimations from "@/components/pages/services/ServicesInitAnim import { Service } from "@/lib/api/serviceService"; import { generateServiceMetadata } from "@/lib/seo/metadata"; import { ServiceSchema, BreadcrumbSchema } from "@/components/shared/seo/StructuredData"; -import { API_CONFIG, getApiHeaders } from "@/lib/config/api"; +import { API_CONFIG } from "@/lib/config/api"; interface ServicePageProps { params: Promise<{ @@ -24,13 +24,13 @@ interface ServicePageProps { // This pre-generates known pages, but new pages can still be generated on-demand export async function generateStaticParams() { try { - // Use internal API URL for server-side requests - const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${apiUrl}/api/services/`, + `${API_CONFIG.BASE_URL}/api/services/`, { method: 'GET', - headers: getApiHeaders(), + headers: { + 'Content-Type': 'application/json', + }, next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); @@ -57,13 +57,13 @@ export async function generateMetadata({ params }: ServicePageProps) { const { slug } = await params; try { - // Use internal API URL for server-side requests - const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${apiUrl}/api/services/${slug}/`, + `${API_CONFIG.BASE_URL}/api/services/${slug}/`, { method: 'GET', - headers: getApiHeaders(), + headers: { + 'Content-Type': 'application/json', + }, next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); @@ -87,13 +87,13 @@ const ServicePage = async ({ params }: ServicePageProps) => { const { slug } = await params; try { - // Use internal API URL for server-side requests - const apiUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086'; const response = await fetch( - `${apiUrl}/api/services/${slug}/`, + `${API_CONFIG.BASE_URL}/api/services/${slug}/`, { method: 'GET', - headers: getApiHeaders(), + headers: { + 'Content-Type': 'application/json', + }, next: { revalidate: 60 }, // Revalidate every minute for faster image updates } ); diff --git a/frontEnd/components/pages/career/Thrive.tsx b/frontEnd/components/pages/career/Thrive.tsx index e2770e81..fce67f7d 100644 --- a/frontEnd/components/pages/career/Thrive.tsx +++ b/frontEnd/components/pages/career/Thrive.tsx @@ -1,8 +1,4 @@ import Image from "next/legacy/image"; -import time from "@/public/images/time.png"; -import trans from "@/public/images/trans.png"; -import support from "@/public/images/support.png"; -import skill from "@/public/images/skill.png"; const Thrive = () => { return ( @@ -20,7 +16,7 @@ const Thrive = () => {
- Image + Image

@@ -35,7 +31,7 @@ const Thrive = () => {

- Image + Image

@@ -50,7 +46,7 @@ const Thrive = () => {

- Image + Image

Support

@@ -63,7 +59,7 @@ const Thrive = () => {
- Image + Image

diff --git a/frontEnd/components/pages/case-study/CaseItems.tsx b/frontEnd/components/pages/case-study/CaseItems.tsx index 544da055..60ac8964 100644 --- a/frontEnd/components/pages/case-study/CaseItems.tsx +++ b/frontEnd/components/pages/case-study/CaseItems.tsx @@ -3,7 +3,6 @@ import Image from "next/legacy/image"; import Link from "next/link"; import { useCaseStudies } from "@/lib/hooks/useCaseStudy"; import { getImageUrl } from "@/lib/imageUtils"; -import one from "@/public/images/case/one.png"; const CaseItems = () => { const { caseStudies, loading: casesLoading } = useCaseStudies(); @@ -56,7 +55,7 @@ const CaseItems = () => {
{caseStudy.title} {
{relatedCase.title} { diff --git a/frontEnd/components/pages/home/Overview.tsx b/frontEnd/components/pages/home/Overview.tsx index ec36c9d7..1ab33629 100644 --- a/frontEnd/components/pages/home/Overview.tsx +++ b/frontEnd/components/pages/home/Overview.tsx @@ -5,14 +5,15 @@ import Link from "next/link"; import { useMemo } from "react"; import { useServices } from "@/lib/hooks/useServices"; import { serviceUtils } from "@/lib/api/serviceService"; -import one from "@/public/images/overview/one.png"; -import two from "@/public/images/overview/two.png"; -import three from "@/public/images/overview/three.png"; -import four from "@/public/images/overview/four.png"; -import five from "@/public/images/overview/five.png"; -// Default images array for fallback -const defaultImages = [one, two, three, four, five]; +// Default images array for fallback - use string paths +const defaultImages = [ + "/images/overview/one.png", + "/images/overview/two.png", + "/images/overview/three.png", + "/images/overview/four.png", + "/images/overview/five.png" +]; const Overview = () => { // Memoize the parameters to prevent infinite re-renders diff --git a/frontEnd/components/pages/home/ServiceIntro.tsx b/frontEnd/components/pages/home/ServiceIntro.tsx index a1f81fc9..63abe7ed 100644 --- a/frontEnd/components/pages/home/ServiceIntro.tsx +++ b/frontEnd/components/pages/home/ServiceIntro.tsx @@ -1,6 +1,5 @@ import Image from "next/legacy/image"; import Link from "next/link"; -import thumb from "@/public/images/leading.jpg"; const ServiceIntro = () => { return ( @@ -11,7 +10,7 @@ const ServiceIntro = () => {
Enterprise Software Solutions {
{service.title}
- Image + Image

{
  • @@ -123,7 +123,7 @@ const CareerBanner = () => {
  • diff --git a/frontEnd/components/pages/services/ServicesBanner.tsx b/frontEnd/components/pages/services/ServicesBanner.tsx index 73accc49..c6356a9e 100644 --- a/frontEnd/components/pages/services/ServicesBanner.tsx +++ b/frontEnd/components/pages/services/ServicesBanner.tsx @@ -84,7 +84,7 @@ const ServicesBanner = () => {
    • @@ -93,7 +93,7 @@ const ServicesBanner = () => {
    • diff --git a/frontEnd/components/shared/layout/footer/Footer.tsx b/frontEnd/components/shared/layout/footer/Footer.tsx index 3db7dfea..ed552c16 100644 --- a/frontEnd/components/shared/layout/footer/Footer.tsx +++ b/frontEnd/components/shared/layout/footer/Footer.tsx @@ -289,7 +289,7 @@ const Footer = () => {
      {
    • @@ -184,7 +184,7 @@ const OffcanvasMenu = ({
    • diff --git a/frontEnd/lib/seo/metadata.ts b/frontEnd/lib/seo/metadata.ts index d49b0155..8b2e162a 100644 --- a/frontEnd/lib/seo/metadata.ts +++ b/frontEnd/lib/seo/metadata.ts @@ -17,8 +17,8 @@ export const SITE_CONFIG = { country: 'Bulgaria', }, social: { - linkedin: 'https://www.linkedin.com/company/gnxtech', - github: 'https://github.com/gnxtech', + linkedin: 'https://linkedin.com', + github: 'https://github.com', }, businessHours: 'Monday - Friday: 9:00 AM - 6:00 PM PST', foundedYear: 2020, From 4c15d90a6a68308f42a917d8d90ac0593122d9f2 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Sat, 29 Nov 2025 14:49:53 +0200 Subject: [PATCH 6/7] updates --- backEnd/contact/views.py | 9 +++++++ backEnd/gnx/middleware/csrf_exempt.py | 37 +++++++++++++++++++++++++++ backEnd/gnx/settings.py | 1 + 3 files changed, 47 insertions(+) create mode 100644 backEnd/gnx/middleware/csrf_exempt.py diff --git a/backEnd/contact/views.py b/backEnd/contact/views.py index 976ce0e6..ee9b7182 100644 --- a/backEnd/contact/views.py +++ b/backEnd/contact/views.py @@ -62,6 +62,15 @@ class ContactSubmissionViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] + def get_authenticators(self): + """ + Override authentication for create action to bypass CSRF. + By returning an empty list, DRF won't enforce CSRF for this action. + """ + if hasattr(self, 'action') and self.action == 'create': + return [] + return super().get_authenticators() + def create(self, request, *args, **kwargs): """ Create a new contact submission. diff --git a/backEnd/gnx/middleware/csrf_exempt.py b/backEnd/gnx/middleware/csrf_exempt.py new file mode 100644 index 00000000..a05b0d96 --- /dev/null +++ b/backEnd/gnx/middleware/csrf_exempt.py @@ -0,0 +1,37 @@ +""" +CSRF Exemption Middleware +Exempts CSRF checks for specific public API endpoints that don't require authentication. +""" + +from django.utils.deprecation import MiddlewareMixin +import re + + +class CSRFExemptMiddleware(MiddlewareMixin): + """ + Middleware to exempt CSRF for public API endpoints. + Runs before CSRF middleware to set the exemption flag. + """ + + # Paths that should be exempt from CSRF (public endpoints) + # Patterns match both with and without trailing slashes + EXEMPT_PATHS = [ + r'^/api/contact/submissions/?$', # Contact form submission + r'^/api/career/applications/?$', # Job application submission (if needed) + r'^/api/support/tickets/?$', # Support ticket creation (if needed) + ] + + def process_request(self, request): + """ + Set CSRF exemption flag for matching paths. + """ + if request.method == 'POST': + path = request.path + for pattern in self.EXEMPT_PATHS: + if re.match(pattern, path): + # Set flag to bypass CSRF check + setattr(request, '_dont_enforce_csrf_checks', True) + break + + return None + diff --git a/backEnd/gnx/settings.py b/backEnd/gnx/settings.py index 00db7acd..3327973c 100644 --- a/backEnd/gnx/settings.py +++ b/backEnd/gnx/settings.py @@ -68,6 +68,7 @@ MIDDLEWARE = [ 'gnx.middleware.api_security.FrontendAPIProxyMiddleware', # Validate requests from frontend/nginx 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', + 'gnx.middleware.csrf_exempt.CSRFExemptMiddleware', # Exempt CSRF for public API endpoints 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', From d7d7a2757a183aa1abd9dbabff804c45298df4e5 Mon Sep 17 00:00:00 2001 From: Iliyan Angelov Date: Sat, 29 Nov 2025 18:05:34 +0200 Subject: [PATCH 7/7] updates --- frontEnd/components/pages/home/Story.tsx | 58 +++++++++++++----------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/frontEnd/components/pages/home/Story.tsx b/frontEnd/components/pages/home/Story.tsx index 260396e0..68612b0b 100644 --- a/frontEnd/components/pages/home/Story.tsx +++ b/frontEnd/components/pages/home/Story.tsx @@ -273,7 +273,9 @@ const Story = () => {

      - {item.title} + + {item.title} +

      {item.excerpt || item.description?.substring(0, 150) + '...'}

@@ -300,32 +302,34 @@ const Story = () => { className={`tp-story-thumb ${isActive ? "thumb-active" : ""}`} data-loaded={isLoaded} > - {item.title { - if (!isLoaded) { - setImagesLoaded((prev) => { - const newSet = new Set(prev); - newSet.add(index); - return newSet; - }); - } - }} - /> + + {item.title { + if (!isLoaded) { + setImagesLoaded((prev) => { + const newSet = new Set(prev); + newSet.add(index); + return newSet; + }); + } + }} + /> +
); })}