This commit is contained in:
Iliyan Angelov
2025-11-25 02:06:38 +02:00
parent 2f6dca736a
commit 82024016cd
37 changed files with 1800 additions and 1478 deletions

97
.gitignore vendored Normal file
View File

@@ -0,0 +1,97 @@
# Environment files
.env
.env.local
.env.production
.env.*.local
backEnd/.env
frontEnd/.env.production
frontEnd/.env.local
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
venv/
env/
ENV/
.venv
# Django
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
backEnd/media/
backEnd/staticfiles/
backEnd/static/
backEnd/logs/
# Node.js
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
frontEnd/.next/
frontEnd/out/
frontEnd/build/
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Logs
*.log
logs/
# Coverage
htmlcov/
.coverage
.coverage.*
coverage.xml
*.cover
# Testing
.pytest_cache/
.tox/
# PM2
.pm2/
# SSL Certificates
*.pem
*.key
*.crt
# Backup files
*.sql
*.backup
*.bak
# Temporary files
*.tmp
*.temp

View File

@@ -1,39 +0,0 @@
__pycache__
*.pyc
*.pyo
*.pyd
.Python
*.so
*.egg
*.egg-info
dist
build
.venv
venv/
env/
ENV/
.env
.venv
*.log
logs/
*.db
*.sqlite3
db.sqlite3
.git
.gitignore
README.md
*.md
.DS_Store
.vscode
.idea
*.swp
*.swo
*~
.pytest_cache
.coverage
htmlcov/
.tox/
.mypy_cache/
.dmypy.json
dmypy.json

68
backEnd/.gitignore vendored Normal file
View File

@@ -0,0 +1,68 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual Environment
venv/
env/
ENV/
.venv
# Django
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
/media
/staticfiles
/static
# Environment variables
.env
.env.local
.env.*.local
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Logs
logs/
*.log
# Coverage
htmlcov/
.coverage
.coverage.*
coverage.xml
*.cover
# Testing
.pytest_cache/
.tox/

View File

@@ -1,36 +0,0 @@
# Django Backend Dockerfile
FROM python:3.12-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
DEBIAN_FRONTEND=noninteractive
# Set work directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
postgresql-client \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt /app/
RUN pip install --no-cache-dir -r requirements.txt
# Copy project
COPY . /app/
# Create directories for media and static files
RUN mkdir -p /app/media /app/staticfiles /app/logs
# Collect static files (will be done at runtime if needed)
# RUN python manage.py collectstatic --noinput
# Expose port
EXPOSE 1086
# Run gunicorn
CMD ["gunicorn", "--bind", "0.0.0.0:1086", "--workers", "3", "--timeout", "120", "--access-logfile", "-", "--error-logfile", "-", "gnx.wsgi:application"]

View File

@@ -0,0 +1,101 @@
"""
Custom email backend that handles localhost SSL certificate issues.
Disables SSL certificate verification for localhost connections.
"""
import ssl
from django.core.mail.backends.smtp import EmailBackend
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
class LocalhostSMTPBackend(EmailBackend):
"""
Custom SMTP backend that disables SSL certificate verification
for localhost connections. This is safe for localhost mail servers.
"""
def open(self):
"""
Override to create SSL context without certificate verification
when connecting to localhost.
"""
if self.use_ssl or self.use_tls:
# Check if connecting to localhost
if self.host in ['localhost', '127.0.0.1', '::1']:
# Create SSL context without certificate verification for localhost
self.connection = None
try:
import smtplib
if self.use_ssl:
# For SSL connections
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# SMTP_SSL uses 'context' parameter (Python 3.3+)
import sys
if sys.version_info >= (3, 3):
self.connection = smtplib.SMTP_SSL(
self.host,
self.port,
timeout=self.timeout,
context=context
)
else:
# For older Python, use unverified context
self.connection = smtplib.SMTP_SSL(
self.host,
self.port,
timeout=self.timeout
)
else:
# For TLS connections
self.connection = smtplib.SMTP(
self.host,
self.port,
timeout=self.timeout
)
# Create SSL context without certificate verification
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Use context parameter (Python 3.4+ uses 'context', not 'ssl_context')
# For older versions, we'll need to patch the socket after starttls
import sys
if sys.version_info >= (3, 4):
# Python 3.4+ supports context parameter
self.connection.starttls(context=context)
else:
# For older Python, disable verification globally for this connection
# by monkey-patching ssl._create_default_https_context temporarily
original_context = ssl._create_default_https_context
ssl._create_default_https_context = ssl._create_unverified_context
try:
self.connection.starttls()
finally:
ssl._create_default_https_context = original_context
if self.username and self.password:
self.connection.login(self.username, self.password)
logger.info(f"Successfully connected to localhost mail server at {self.host}:{self.port}")
return True
except Exception as e:
logger.error(f"Failed to connect to localhost mail server: {str(e)}")
if self.connection:
try:
self.connection.quit()
except:
pass
self.connection = None
raise
else:
# For non-localhost, use standard SSL/TLS with certificate verification
return super().open()
else:
# No SSL/TLS, use standard connection
return super().open()

View File

@@ -98,22 +98,34 @@ WSGI_APPLICATION = 'gnx.wsgi.application'
# Database # Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases # https://docs.djangoproject.com/en/4.2/ref/settings/#databases
# Support both PostgreSQL (production) and SQLite (development) # Force SQLite - change this to False and set USE_POSTGRESQL=True to use PostgreSQL
DATABASE_URL = config('DATABASE_URL', default='') FORCE_SQLITE = True # Set to False to allow PostgreSQL
if DATABASE_URL and DATABASE_URL.startswith('postgresql://'):
# PostgreSQL configuration if not FORCE_SQLITE:
import dj_database_url # PostgreSQL configuration (only if FORCE_SQLITE is False)
DATABASES = { USE_POSTGRESQL = config('USE_POSTGRESQL', default='False', cast=bool)
'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600) DATABASE_URL = config('DATABASE_URL', default='')
} if USE_POSTGRESQL and DATABASE_URL and DATABASE_URL.startswith('postgresql://'):
import dj_database_url
DATABASES = {
'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600)
}
else:
# Fallback to SQLite
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else: else:
# SQLite configuration (development/fallback) # SQLite configuration (forced)
DATABASES = { DATABASES = {
'default': { 'default': {
'ENGINE': 'django.db.backends.sqlite3', 'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3',
}
} }
}
# Password validation # Password validation
@@ -355,8 +367,12 @@ if DEBUG and not USE_SMTP_IN_DEV:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else: else:
# Production or Dev with SMTP enabled - use SMTP backend # Production or Dev with SMTP enabled - use SMTP backend
EMAIL_BACKEND = config('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = config('EMAIL_HOST', default='mail.gnxsoft.com') EMAIL_HOST = config('EMAIL_HOST', default='mail.gnxsoft.com')
# Use custom backend for localhost to handle SSL certificate issues
if EMAIL_HOST in ['localhost', '127.0.0.1', '::1']:
EMAIL_BACKEND = 'gnx.email_backend.LocalhostSMTPBackend'
else:
EMAIL_BACKEND = config('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
EMAIL_PORT = config('EMAIL_PORT', default=587, cast=int) EMAIL_PORT = config('EMAIL_PORT', default=587, cast=int)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=True, cast=bool) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=True, cast=bool)
EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool) EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
@@ -367,7 +383,8 @@ else:
EMAIL_TIMEOUT = config('EMAIL_TIMEOUT', default=30, cast=int) EMAIL_TIMEOUT = config('EMAIL_TIMEOUT', default=30, cast=int)
# Site URL for email links # Site URL for email links
SITE_URL = config('SITE_URL', default='http://localhost:3000') # Use production URL by default if not in DEBUG mode
SITE_URL = config('SITE_URL', default='https://gnxsoft.com' if not DEBUG else 'http://localhost:3000')
# Email connection settings for production reliability # Email connection settings for production reliability
EMAIL_CONNECTION_TIMEOUT = config('EMAIL_CONNECTION_TIMEOUT', default=10, cast=int) EMAIL_CONNECTION_TIMEOUT = config('EMAIL_CONNECTION_TIMEOUT', default=10, cast=int)

View File

@@ -1,26 +1,33 @@
# Production Environment Configuration for GNX Contact Form # Production Environment Configuration for GNX-WEB
# Copy this file to .env and update with your actual values # Copy this file to .env in the backEnd directory and update with your actual values
# Backend runs on port 1086 (internal only, proxied through nginx)
# Django Settings # Django Settings
SECRET_KEY=your-super-secret-production-key-here SECRET_KEY=your-super-secret-production-key-here-change-this-immediately
DEBUG=False DEBUG=False
ALLOWED_HOSTS=gnxsoft.com,www.gnxsoft.com,your-server-ip ALLOWED_HOSTS=gnxsoft.com,www.gnxsoft.com,your-server-ip,localhost,127.0.0.1
# Database - Using SQLite (default) # Database - PostgreSQL on host (port 5433 to avoid conflict with Docker instance on 5432)
# SQLite is configured in settings.py - no DATABASE_URL needed # Format: postgresql://USER:PASSWORD@HOST:PORT/DBNAME
# Create database: sudo -u postgres psql
# CREATE DATABASE gnx_db;
# CREATE USER gnx_user WITH PASSWORD 'your_secure_password';
# GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user;
DATABASE_URL=postgresql://gnx_user:your_password_here@localhost:5433/gnx_db
# Email Configuration (Production) # Email Configuration (Production)
EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend
EMAIL_HOST=smtp.gmail.com EMAIL_HOST=mail.gnxsoft.com
EMAIL_PORT=587 EMAIL_PORT=587
EMAIL_USE_TLS=True EMAIL_USE_TLS=True
EMAIL_USE_SSL=False EMAIL_USE_SSL=False
EMAIL_HOST_USER=your-email@gmail.com EMAIL_HOST_USER=your-email@gnxsoft.com
EMAIL_HOST_PASSWORD=your-app-password EMAIL_HOST_PASSWORD=your-email-password
DEFAULT_FROM_EMAIL=noreply@gnxsoft.com DEFAULT_FROM_EMAIL=noreply@gnxsoft.com
# Company email for contact form notifications # Company email for contact form notifications
COMPANY_EMAIL=contact@gnxsoft.com COMPANY_EMAIL=contact@gnxsoft.com
SUPPORT_EMAIL=support@gnxsoft.com
# Email timeout settings for production reliability # Email timeout settings for production reliability
EMAIL_TIMEOUT=30 EMAIL_TIMEOUT=30
@@ -35,6 +42,8 @@ SECURE_HSTS_PRELOAD=True
SECURE_CONTENT_TYPE_NOSNIFF=True SECURE_CONTENT_TYPE_NOSNIFF=True
SECURE_BROWSER_XSS_FILTER=True SECURE_BROWSER_XSS_FILTER=True
X_FRAME_OPTIONS=DENY X_FRAME_OPTIONS=DENY
SESSION_COOKIE_SECURE=True
CSRF_COOKIE_SECURE=True
# CORS Settings (Production) # CORS Settings (Production)
PRODUCTION_ORIGINS=https://gnxsoft.com,https://www.gnxsoft.com PRODUCTION_ORIGINS=https://gnxsoft.com,https://www.gnxsoft.com
@@ -47,15 +56,27 @@ CSRF_TRUSTED_ORIGINS=https://gnxsoft.com,https://www.gnxsoft.com
# REQUIRED in production! Auto-generated only in DEBUG mode. # REQUIRED in production! Auto-generated only in DEBUG mode.
# Generate a secure key: python -c "import secrets; print(secrets.token_urlsafe(32))" # Generate a secure key: python -c "import secrets; print(secrets.token_urlsafe(32))"
# Or get current key: python manage.py show_api_key # Or get current key: python manage.py show_api_key
# This key must match the one in nginx configuration
INTERNAL_API_KEY=your-secure-api-key-here-change-this-in-production INTERNAL_API_KEY=your-secure-api-key-here-change-this-in-production
# Admin IP Restriction - Only these IPs can access Django admin # Admin IP Restriction - Only these IPs can access Django admin
# Comma-separated list of IP addresses or CIDR networks (e.g., 193.194.155.249 or 192.168.1.0/24) # Comma-separated list of IP addresses or CIDR networks (e.g., 193.194.155.249 or 192.168.1.0/24)
ADMIN_ALLOWED_IPS=193.194.155.249 ADMIN_ALLOWED_IPS=193.194.155.249
# Static Files # Custom allowed IPs for IP whitelist middleware (optional, comma-separated)
STATIC_ROOT=/var/www/gnx/staticfiles/ CUSTOM_ALLOWED_IPS=
MEDIA_ROOT=/var/www/gnx/media/
# Site URL for email links and absolute URLs
SITE_URL=https://gnxsoft.com
# Static and Media Files (relative to backEnd directory)
# These will be collected/served from these locations
STATIC_ROOT=/home/gnx/Desktop/GNX-WEB/backEnd/staticfiles
MEDIA_ROOT=/home/gnx/Desktop/GNX-WEB/backEnd/media
# Logging # Logging
LOG_LEVEL=INFO LOG_LEVEL=INFO
# Backend Port (internal only, nginx proxies to this)
# Backend runs on 127.0.0.1:1086
BACKEND_PORT=1086

View File

@@ -1,249 +0,0 @@
#!/bin/bash
# Clean script for GNX Web Application - Prepares project for deployment
# This script removes all cache files, build artifacts, and temporary files
set -e
echo "🧹 Cleaning GNX Web Application for deployment..."
echo ""
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to safely remove directories
remove_dir() {
if [ -d "$1" ]; then
echo -e "${YELLOW}Removing: $1${NC}"
rm -rf "$1"
echo -e "${GREEN}✅ Removed: $1${NC}"
fi
}
# Function to safely remove files
remove_file() {
if [ -f "$1" ]; then
echo -e "${YELLOW}Removing: $1${NC}"
rm -f "$1"
echo -e "${GREEN}✅ Removed: $1${NC}"
fi
}
# Function to find and remove files by pattern
remove_pattern() {
find . -name "$1" -type f -not -path "./.git/*" -not -path "./node_modules/*" 2>/dev/null | while read -r file; do
echo -e "${YELLOW}Removing: $file${NC}"
rm -f "$file"
done
echo -e "${GREEN}✅ Cleaned: $1${NC}"
}
# Function to find and remove directories by pattern
remove_dir_pattern() {
find . -name "$1" -type d -not -path "./.git/*" -not -path "./node_modules/*" 2>/dev/null | while read -r dir; do
echo -e "${YELLOW}Removing: $dir${NC}"
rm -rf "$dir"
done
echo -e "${GREEN}✅ Cleaned: $1${NC}"
}
echo "📦 Step 1: Stopping Docker containers (if running)..."
docker-compose down 2>/dev/null || true
echo ""
echo "📦 Step 2: Removing Docker volumes (optional - uncomment if needed)..."
# Uncomment the next line if you want to remove Docker volumes (WARNING: This deletes database data!)
# docker-compose down -v 2>/dev/null || true
echo ""
echo "📦 Step 3: Removing Docker build cache..."
docker system prune -f --volumes 2>/dev/null || true
echo ""
echo "🐍 Step 4: Cleaning Python artifacts..."
# Remove Python cache directories
remove_dir_pattern "__pycache__"
# Remove Python compiled files
remove_pattern "*.pyc"
remove_pattern "*.pyo"
remove_pattern "*.pyd"
# Remove Python egg files
remove_pattern "*.egg"
remove_dir_pattern "*.egg-info"
# Remove Python virtual environments
remove_dir "backEnd/venv"
remove_dir "frontEnd/venv"
remove_dir ".venv"
remove_dir "venv"
remove_dir "env"
remove_dir "ENV"
# Remove Python build directories
remove_dir "backEnd/build"
remove_dir "backEnd/dist"
remove_dir "frontEnd/build"
remove_dir "frontEnd/dist"
# Remove Python test artifacts
remove_dir ".pytest_cache"
remove_dir ".coverage"
remove_dir "htmlcov"
remove_dir ".tox"
remove_dir ".mypy_cache"
remove_file ".dmypy.json"
remove_file "dmypy.json"
echo ""
echo "📦 Step 5: Cleaning Node.js artifacts..."
# Remove node_modules
remove_dir "frontEnd/node_modules"
# Remove Next.js build artifacts
remove_dir "frontEnd/.next"
remove_dir "frontEnd/out"
remove_dir "frontEnd/build"
remove_dir "frontEnd/.pnp"
remove_file "frontEnd/.pnp.js"
# Remove TypeScript build info
remove_pattern "*.tsbuildinfo"
remove_file "frontEnd/next-env.d.ts"
# Remove package manager files
remove_file "frontEnd/.yarn/install-state.gz"
echo ""
echo "📝 Step 6: Cleaning log files..."
# Remove log files
remove_pattern "*.log"
remove_dir "backEnd/logs"
remove_file "frontEnd/dev.log"
remove_file "frontEnd/npm-debug.log*"
remove_file "frontEnd/yarn-debug.log*"
remove_file "frontEnd/yarn-error.log*"
echo ""
echo "🗄️ Step 7: Cleaning database files..."
# Remove SQLite databases (keep if you need them, but typically not for deployment)
# Uncomment if you want to remove SQLite files
# remove_file "backEnd/db.sqlite3"
# remove_pattern "*.db"
# remove_pattern "*.sqlite"
# remove_pattern "*.sqlite3"
# Remove migration marker files
remove_file ".migrated_to_postgres"
echo ""
echo "📁 Step 8: Cleaning static files (will be regenerated on build)..."
# Remove collected static files (they'll be regenerated)
remove_dir "backEnd/staticfiles"
echo ""
echo "💾 Step 9: Cleaning backup files..."
# Remove backup files
remove_pattern "*.backup"
remove_pattern "*.bak"
remove_pattern "*~"
remove_pattern "*.swp"
remove_pattern "*.swo"
remove_dir "backups"
echo ""
echo "🖥️ Step 10: Cleaning IDE and OS files..."
# Remove IDE directories
remove_dir ".vscode"
remove_dir ".idea"
remove_dir "backEnd/.vscode"
remove_dir "backEnd/.idea"
remove_dir "frontEnd/.vscode"
remove_dir "frontEnd/.idea"
# Remove OS files
remove_pattern ".DS_Store"
remove_pattern "Thumbs.db"
remove_pattern ".DS_Store?"
echo ""
echo "🔐 Step 11: Cleaning environment files (keeping examples)..."
# Remove local env files (keep examples)
remove_file ".env.local"
remove_file ".env.development.local"
remove_file ".env.test.local"
remove_file ".env.production.local"
remove_file "frontEnd/.env.local"
remove_file "frontEnd/.env.development.local"
remove_file "frontEnd/.env.test.local"
remove_file "frontEnd/.env.production.local"
# Note: We keep .env.production as it's needed for deployment
echo -e "${YELLOW}⚠️ Note: .env.production is kept (needed for deployment)${NC}"
echo ""
echo "📦 Step 12: Cleaning other artifacts..."
# Remove coverage directories
remove_dir "coverage"
remove_dir ".nyc_output"
remove_dir "frontEnd/coverage"
# Remove vercel directory
remove_dir "frontEnd/.vercel"
# Remove certificate files (if any)
remove_pattern "*.pem"
echo ""
echo "🧹 Step 13: Final cleanup..."
# Remove any remaining temporary files
find . -name "*.tmp" -type f -not -path "./.git/*" 2>/dev/null | while read -r file; do
remove_file "$file"
done
# Remove empty directories (optional - be careful with this)
# find . -type d -empty -not -path "./.git/*" -not -path "./node_modules/*" -delete 2>/dev/null || true
echo ""
echo "✅ Cleanup complete!"
echo ""
echo "📋 Summary:"
echo " - Python cache files removed"
echo " - Virtual environments removed"
echo " - Node.js artifacts removed"
echo " - Build artifacts removed"
echo " - Log files removed"
echo " - IDE/OS files removed"
echo ""
echo "⚠️ Important notes:"
echo " - .env.production is kept (needed for deployment)"
echo " - Media files are kept (user uploads)"
echo " - Docker volumes were NOT removed (database data preserved)"
echo " - If you need a complete clean, uncomment Docker volume removal in the script"
echo ""
echo "🚀 Project is now ready for deployment!"
echo " Run: ./docker-start.sh to start the stack"

View File

@@ -1,56 +0,0 @@
#!/bin/bash
# Script to create a production deployment zip file
set -e
ZIP_NAME="gnx-web-production-$(date +%Y%m%d).zip"
TEMP_DIR=$(mktemp -d)
echo "📦 Creating deployment package: $ZIP_NAME"
echo ""
# Copy files to temp directory
echo "📋 Copying files..."
rsync -av --progress \
--exclude='.git' \
--exclude='node_modules' \
--exclude='__pycache__' \
--exclude='*.pyc' \
--exclude='venv' \
--exclude='env' \
--exclude='.venv' \
--exclude='*.log' \
--exclude='*.sqlite3' \
--exclude='backups' \
--exclude='*.swp' \
--exclude='*.swo' \
--exclude='.DS_Store' \
--exclude='.vscode' \
--exclude='.idea' \
--exclude='.next' \
--exclude='dist' \
--exclude='build' \
--exclude='*.egg-info' \
--exclude='.dockerignore' \
--exclude='.zipignore' \
./ "$TEMP_DIR/gnx-web/"
# Create zip
echo ""
echo "🗜️ Creating zip file..."
cd "$TEMP_DIR"
zip -r "$ZIP_NAME" gnx-web/ > /dev/null
# Move to original directory
mv "$ZIP_NAME" "$OLDPWD/"
# Cleanup
cd "$OLDPWD"
rm -rf "$TEMP_DIR"
echo "✅ Deployment package created: $ZIP_NAME"
echo ""
echo "📋 File size: $(du -h "$ZIP_NAME" | cut -f1)"
echo ""
echo "📤 Ready to upload to server!"

303
deploy.sh Executable file
View File

@@ -0,0 +1,303 @@
#!/bin/bash
# GNX-WEB Complete Deployment Script
# This script sets up and deploys the entire application
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Get script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BACKEND_DIR="$SCRIPT_DIR/backEnd"
FRONTEND_DIR="$SCRIPT_DIR/frontEnd"
# Function to generate secure random key
generate_secret_key() {
python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \
openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1
}
# Function to update .env file with generated keys
update_env_file() {
local env_file="$1"
local secret_key="$2"
local api_key="$3"
# Update SECRET_KEY
if grep -q "^SECRET_KEY=" "$env_file"; then
sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$secret_key|" "$env_file"
else
echo "SECRET_KEY=$secret_key" >> "$env_file"
fi
# Update INTERNAL_API_KEY
if grep -q "^INTERNAL_API_KEY=" "$env_file"; then
sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$api_key|" "$env_file"
else
echo "INTERNAL_API_KEY=$api_key" >> "$env_file"
fi
# Update STATIC_ROOT and MEDIA_ROOT paths
sed -i "s|^STATIC_ROOT=.*|STATIC_ROOT=$BACKEND_DIR/staticfiles|" "$env_file"
sed -i "s|^MEDIA_ROOT=.*|MEDIA_ROOT=$BACKEND_DIR/media|" "$env_file"
}
# Function to update nginx config with API key
update_nginx_config() {
local nginx_config="$1"
local api_key="$2"
# Escape special characters in API key for sed
local escaped_key=$(echo "$api_key" | sed 's/[[\.*^$()+?{|]/\\&/g')
# Update API key in both /api/ and /admin/ locations
sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" "$nginx_config"
}
echo -e "${BLUE}=========================================="
echo "GNX-WEB Deployment Script"
echo "==========================================${NC}"
echo ""
# Check if running as root for system-level operations
if [ "$EUID" -ne 0 ]; then
echo -e "${YELLOW}Note: Some operations require root privileges${NC}"
echo -e "${YELLOW}You may be prompted for sudo password${NC}"
echo ""
fi
# Generate secure keys
echo -e "${GREEN}[0/8] Generating secure keys...${NC}"
SECRET_KEY=$(generate_secret_key 50)
INTERNAL_API_KEY=$(generate_secret_key 32)
echo -e "${GREEN}✓ Generated SECRET_KEY${NC}"
echo -e "${GREEN}✓ Generated INTERNAL_API_KEY${NC}"
echo ""
# Step 1: Install PostgreSQL
echo -e "${GREEN}[1/8] Installing PostgreSQL...${NC}"
if [ -f "$SCRIPT_DIR/install-postgresql.sh" ]; then
sudo bash "$SCRIPT_DIR/install-postgresql.sh"
else
echo -e "${RED}Error: install-postgresql.sh not found${NC}"
exit 1
fi
# Step 2: Setup Backend
echo -e "${GREEN}[2/8] Setting up Backend...${NC}"
cd "$BACKEND_DIR"
# Create virtual environment if it doesn't exist
if [ ! -d "venv" ]; then
echo -e "${BLUE}Creating Python virtual environment...${NC}"
python3 -m venv venv
fi
# Activate virtual environment
source venv/bin/activate
# Install Python dependencies
echo -e "${BLUE}Installing Python dependencies...${NC}"
pip install --upgrade pip
pip install -r requirements.txt
# Create .env file if it doesn't exist
if [ ! -f ".env" ]; then
echo -e "${BLUE}Creating .env file from production.env.example...${NC}"
cp production.env.example .env
fi
# Update .env file with generated keys and paths
echo -e "${BLUE}Updating .env file with generated keys...${NC}"
update_env_file ".env" "$SECRET_KEY" "$INTERNAL_API_KEY"
echo -e "${GREEN}✓ Updated .env file with generated keys${NC}"
# Check if critical values still need to be updated
if grep -q "your_password_here\|your-email\|your-server-ip" .env; then
echo -e "${YELLOW}⚠ Some values in .env still need to be updated:${NC}"
echo -e "${YELLOW} - DATABASE_URL (database password)${NC}"
echo -e "${YELLOW} - Email settings${NC}"
echo -e "${YELLOW} - ALLOWED_HOSTS (server IP/domain)${NC}"
echo -e "${YELLOW} - ADMIN_ALLOWED_IPS${NC}"
echo ""
echo -e "${YELLOW}Press Enter to continue (you can update these later)...${NC}"
read
fi
# Create necessary directories
mkdir -p logs media staticfiles
# Step 3: Setup Database
echo -e "${GREEN}[3/8] Setting up Database...${NC}"
echo -e "${YELLOW}Make sure PostgreSQL is running and database is created${NC}"
echo -e "${YELLOW}Run these commands if needed:${NC}"
echo " sudo -u postgres psql"
echo " CREATE DATABASE gnx_db;"
echo " CREATE USER gnx_user WITH PASSWORD 'your_password';"
echo " GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user;"
echo ""
echo -e "${YELLOW}Press Enter to continue after database is ready...${NC}"
read
# Run migrations
echo -e "${BLUE}Running database migrations...${NC}"
python manage.py migrate --noinput
# Collect static files
echo -e "${BLUE}Collecting static files...${NC}"
python manage.py collectstatic --noinput
# Step 4: Setup Frontend
echo -e "${GREEN}[4/8] Setting up Frontend...${NC}"
cd "$FRONTEND_DIR"
# Install Node.js dependencies
if [ ! -d "node_modules" ]; then
echo -e "${BLUE}Installing Node.js dependencies...${NC}"
npm install
fi
# Create .env.production if it doesn't exist
if [ ! -f ".env.production" ]; then
echo -e "${BLUE}Creating .env.production file...${NC}"
cat > .env.production << EOF
NEXT_PUBLIC_SITE_URL=https://gnxsoft.com
NEXT_PUBLIC_API_URL=
PORT=1087
NODE_ENV=production
NEXT_TELEMETRY_DISABLED=1
EOF
echo -e "${GREEN}✓ Created .env.production${NC}"
else
# Update PORT if it exists but is different
if ! grep -q "^PORT=1087" .env.production; then
echo -e "${BLUE}Updating PORT in .env.production...${NC}"
if grep -q "^PORT=" .env.production; then
sed -i "s|^PORT=.*|PORT=1087|" .env.production
else
echo "PORT=1087" >> .env.production
fi
echo -e "${GREEN}✓ Updated PORT in .env.production${NC}"
fi
# Ensure NODE_ENV is set to production
if ! grep -q "^NODE_ENV=production" .env.production; then
if grep -q "^NODE_ENV=" .env.production; then
sed -i "s|^NODE_ENV=.*|NODE_ENV=production|" .env.production
else
echo "NODE_ENV=production" >> .env.production
fi
fi
fi
# Build frontend
echo -e "${BLUE}Building frontend for production...${NC}"
NODE_ENV=production PORT=1087 npm run build
# Step 5: Install PM2
echo -e "${GREEN}[5/8] Installing PM2...${NC}"
if ! command -v pm2 &> /dev/null; then
echo -e "${BLUE}Installing PM2 globally...${NC}"
sudo npm install -g pm2
pm2 startup systemd -u $USER --hp $HOME
echo -e "${YELLOW}Please run the command shown above to enable PM2 on boot${NC}"
else
echo -e "${GREEN}PM2 is already installed${NC}"
fi
# Step 6: Configure Firewall
echo -e "${GREEN}[6/8] Configuring Firewall...${NC}"
if command -v ufw &> /dev/null; then
echo -e "${BLUE}Configuring UFW firewall...${NC}"
sudo ufw allow 80/tcp comment 'HTTP'
sudo ufw allow 443/tcp comment 'HTTPS'
sudo ufw deny 1086/tcp comment 'Backend - Internal Only'
sudo ufw deny 1087/tcp comment 'Frontend - Internal Only'
sudo ufw deny 5433/tcp comment 'PostgreSQL - Internal Only'
echo -e "${YELLOW}Firewall rules configured. Enable with: sudo ufw enable${NC}"
else
echo -e "${YELLOW}UFW not found. Please configure firewall manually${NC}"
fi
# Step 7: Setup Nginx
echo -e "${GREEN}[7/8] Setting up Nginx...${NC}"
if command -v nginx &> /dev/null; then
echo -e "${BLUE}Copying nginx configuration...${NC}"
sudo cp "$SCRIPT_DIR/nginx-gnxsoft.conf" /etc/nginx/sites-available/gnxsoft
# Update paths in nginx config
sudo sed -i "s|/home/gnx/Desktop/GNX-WEB|$SCRIPT_DIR|g" /etc/nginx/sites-available/gnxsoft
# Update INTERNAL_API_KEY in nginx config
echo -e "${BLUE}Updating nginx configuration with INTERNAL_API_KEY...${NC}"
update_nginx_config "/etc/nginx/sites-available/gnxsoft" "$INTERNAL_API_KEY"
echo -e "${GREEN}✓ Updated nginx config with INTERNAL_API_KEY${NC}"
# Enable site
if [ ! -L /etc/nginx/sites-enabled/gnxsoft ]; then
sudo ln -s /etc/nginx/sites-available/gnxsoft /etc/nginx/sites-enabled/
fi
# Remove default nginx site if it exists
if [ -L /etc/nginx/sites-enabled/default ]; then
sudo rm /etc/nginx/sites-enabled/default
fi
# Test nginx configuration
echo -e "${BLUE}Testing nginx configuration...${NC}"
if sudo nginx -t; then
echo -e "${GREEN}✓ Nginx configuration is valid${NC}"
else
echo -e "${RED}✗ Nginx configuration has errors${NC}"
echo -e "${YELLOW}Please check the configuration manually${NC}"
fi
echo -e "${YELLOW}Nginx configured. Reload with: sudo systemctl reload nginx${NC}"
else
echo -e "${RED}Nginx not found. Please install nginx first${NC}"
fi
# Step 8: Start Services
echo -e "${GREEN}[8/8] Starting Services...${NC}"
if [ -f "$SCRIPT_DIR/start-services.sh" ]; then
bash "$SCRIPT_DIR/start-services.sh"
else
echo -e "${RED}Error: start-services.sh not found${NC}"
exit 1
fi
echo ""
echo -e "${GREEN}=========================================="
echo "Deployment Complete!"
echo "==========================================${NC}"
echo ""
echo -e "${BLUE}Generated Keys (saved to backEnd/.env and nginx config):${NC}"
echo -e "${GREEN}✓ SECRET_KEY: ${SECRET_KEY:0:20}...${NC}"
echo -e "${GREEN}✓ INTERNAL_API_KEY: ${INTERNAL_API_KEY:0:20}...${NC}"
echo ""
echo -e "${BLUE}Next Steps:${NC}"
echo "1. Update backEnd/.env with remaining configuration:"
echo " - DATABASE_URL (database credentials)"
echo " - Email settings (SMTP configuration)"
echo " - ALLOWED_HOSTS (your domain and server IP)"
echo " - ADMIN_ALLOWED_IPS (your admin IP address)"
echo "2. Create PostgreSQL database and user (if not done)"
echo "3. Run: sudo systemctl reload nginx"
echo "4. Run: sudo ufw enable (to enable firewall)"
echo "5. Check services: pm2 status"
echo "6. View logs: pm2 logs"
echo ""
echo -e "${BLUE}Service URLs:${NC}"
echo " Backend: http://127.0.0.1:1086"
echo " Frontend: http://127.0.0.1:1087"
echo " Public: https://gnxsoft.com (via nginx)"
echo ""
echo -e "${GREEN}Note: Keys have been automatically generated and configured!${NC}"
echo ""

View File

@@ -1,98 +0,0 @@
version: '3.8'
services:
postgres:
image: postgres:16-alpine
container_name: gnx-postgres
restart: unless-stopped
environment:
- POSTGRES_DB=${POSTGRES_DB:-gnxdb}
- POSTGRES_USER=${POSTGRES_USER:-gnx}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-change-this-password}
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- gnx-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-gnx}"]
interval: 10s
timeout: 5s
retries: 5
backend:
build:
context: ./backEnd
dockerfile: Dockerfile
container_name: gnx-backend
restart: unless-stopped
ports:
- "1086:1086"
env_file:
- .env.production
environment:
- DEBUG=False
- SECRET_KEY=${SECRET_KEY:-change-this-in-production}
- ALLOWED_HOSTS=${ALLOWED_HOSTS:-localhost,127.0.0.1,backend}
- DATABASE_URL=${DATABASE_URL:-postgresql://${POSTGRES_USER:-gnx}:${POSTGRES_PASSWORD:-change-this-password}@postgres:5432/${POSTGRES_DB:-gnxdb}}
- ADMIN_ALLOWED_IPS=${ADMIN_ALLOWED_IPS:-193.194.155.249}
- INTERNAL_API_KEY=${INTERNAL_API_KEY}
- EMAIL_BACKEND=${EMAIL_BACKEND:-django.core.mail.backends.console.EmailBackend}
- EMAIL_HOST=${EMAIL_HOST}
- EMAIL_PORT=${EMAIL_PORT:-587}
- EMAIL_USE_TLS=${EMAIL_USE_TLS:-True}
- EMAIL_HOST_USER=${EMAIL_HOST_USER}
- EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD}
- DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL:-noreply@gnxsoft.com}
- COMPANY_EMAIL=${COMPANY_EMAIL:-contact@gnxsoft.com}
volumes:
- ./backEnd/media:/app/media
- ./backEnd/staticfiles:/app/staticfiles
- ./backEnd/logs:/app/logs
depends_on:
postgres:
condition: service_healthy
networks:
- gnx-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:1086/admin/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
frontend:
build:
context: ./frontEnd
dockerfile: Dockerfile
container_name: gnx-frontend
restart: unless-stopped
ports:
- "1087:1087"
env_file:
- .env.production
environment:
- NODE_ENV=production
- DOCKER_ENV=true
- NEXT_PUBLIC_API_URL=http://backend:1086
- PORT=1087
depends_on:
- backend
networks:
- gnx-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:1087/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
gnx-network:
driver: bridge
volumes:
postgres_data:
driver: local
media:
staticfiles:

View File

@@ -1,240 +0,0 @@
#!/bin/bash
# Docker startup script for GNX Web Application
# This script handles automatic setup, permissions, and startup
set -e
echo "🚀 Starting GNX Web Application..."
echo ""
# Set proper permissions for scripts and directories
echo "🔧 Setting up permissions..."
# Make scripts executable
chmod +x docker-start.sh 2>/dev/null || true
chmod +x migrate-data.sh 2>/dev/null || true
chmod +x migrate-sqlite-to-postgres.sh 2>/dev/null || true
# Set permissions for directories
mkdir -p backEnd/media backEnd/staticfiles backEnd/logs backups
chmod 755 backEnd/media backEnd/staticfiles backEnd/logs backups 2>/dev/null || true
# Set permissions for database file if it exists
if [ -f "backEnd/db.sqlite3" ]; then
chmod 644 backEnd/db.sqlite3 2>/dev/null || true
fi
# Set permissions for .env files
if [ -f ".env.production" ]; then
chmod 600 .env.production 2>/dev/null || true
fi
echo "✅ Permissions set"
echo ""
# Check if .env.production exists
if [ ! -f .env.production ]; then
echo "⚠️ Warning: .env.production not found. Creating from example..."
if [ -f .env.production.example ]; then
cp .env.production.example .env.production
echo "📝 Please edit .env.production with your actual values before continuing."
exit 1
else
echo "❌ Error: .env.production.example not found!"
exit 1
fi
fi
# Load environment variables
export $(cat .env.production | grep -v '^#' | xargs)
# Configure Nginx
echo "🔧 Configuring Nginx..."
# Check for existing nginx configs for gnxsoft
NGINX_AVAILABLE="/etc/nginx/sites-available/gnxsoft"
NGINX_ENABLED="/etc/nginx/sites-enabled/gnxsoft"
NGINX_CONF="nginx.conf"
# Check if nginx.conf exists
if [ ! -f "$NGINX_CONF" ]; then
echo "❌ Error: nginx.conf not found in current directory!"
exit 1
fi
# Backup and remove old configs if they exist
if [ -f "$NGINX_AVAILABLE" ]; then
echo "📦 Backing up existing nginx config..."
sudo cp "$NGINX_AVAILABLE" "${NGINX_AVAILABLE}.backup.$(date +%Y%m%d_%H%M%S)"
echo "✅ Old config backed up"
fi
if [ -L "$NGINX_ENABLED" ]; then
echo "🔗 Removing old symlink..."
sudo rm -f "$NGINX_ENABLED"
fi
# Check for other gnxsoft configs and remove them
for file in /etc/nginx/sites-available/gnxsoft* /etc/nginx/sites-enabled/gnxsoft*; do
if [ -f "$file" ] || [ -L "$file" ]; then
if [ "$file" != "$NGINX_AVAILABLE" ] && [ "$file" != "$NGINX_ENABLED" ]; then
echo "🗑️ Removing old config: $file"
sudo rm -f "$file"
fi
fi
done
# Copy new nginx config
echo "📋 Installing new nginx configuration..."
sudo cp "$NGINX_CONF" "$NGINX_AVAILABLE"
# Create symlink
echo "🔗 Creating symlink..."
sudo ln -sf "$NGINX_AVAILABLE" "$NGINX_ENABLED"
# Update paths in nginx config if needed (using current directory)
CURRENT_DIR=$(pwd)
echo "📝 Updating paths in nginx config..."
sudo sed -i "s|/home/gnx/Desktop/GNX-WEB|$CURRENT_DIR|g" "$NGINX_AVAILABLE"
# Generate or get INTERNAL_API_KEY
if [ -z "$INTERNAL_API_KEY" ] || [ "$INTERNAL_API_KEY" = "your-generated-key-here" ]; then
echo "🔑 Generating new INTERNAL_API_KEY..."
INTERNAL_API_KEY=$(python3 -c "import secrets; print(secrets.token_urlsafe(32))" 2>/dev/null || openssl rand -base64 32 | tr -d "=+/" | cut -c1-32)
# Update .env.production with the generated key
if [ -f .env.production ]; then
if grep -q "INTERNAL_API_KEY=" .env.production; then
sed -i "s|INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" .env.production
else
echo "INTERNAL_API_KEY=$INTERNAL_API_KEY" >> .env.production
fi
echo "✅ Updated .env.production with generated INTERNAL_API_KEY"
fi
# Export for use in this script
export INTERNAL_API_KEY
fi
# Set INTERNAL_API_KEY in nginx config
echo "🔑 Setting INTERNAL_API_KEY in nginx config..."
sudo sed -i "s|PLACEHOLDER_INTERNAL_API_KEY|$INTERNAL_API_KEY|g" "$NGINX_AVAILABLE"
echo "✅ INTERNAL_API_KEY configured in nginx"
# Test nginx configuration
echo "🧪 Testing nginx configuration..."
if sudo nginx -t; then
echo "✅ Nginx configuration is valid"
echo "🔄 Reloading nginx..."
sudo systemctl reload nginx
echo "✅ Nginx reloaded successfully"
else
echo "❌ Nginx configuration test failed!"
echo "⚠️ Please check the configuration manually"
exit 1
fi
# Build images
echo "🔨 Building Docker images..."
docker-compose build
# Start containers
echo "▶️ Starting containers..."
docker-compose up -d
# Wait for services to be ready
echo "⏳ Waiting for services to start..."
sleep 10
# Wait for PostgreSQL to be ready (if using PostgreSQL)
if echo "$DATABASE_URL" | grep -q "postgresql://"; then
echo "⏳ Waiting for PostgreSQL to be ready..."
timeout=30
while [ $timeout -gt 0 ]; do
if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then
echo "✅ PostgreSQL is ready"
break
fi
echo " Waiting for PostgreSQL... ($timeout seconds remaining)"
sleep 2
timeout=$((timeout - 2))
done
if [ $timeout -le 0 ]; then
echo "⚠️ Warning: PostgreSQL may not be ready, but continuing..."
fi
# Check if we need to migrate from SQLite
if [ -f "./backEnd/db.sqlite3" ] && [ ! -f ".migrated_to_postgres" ]; then
echo ""
echo "🔄 SQLite database detected. Checking if migration is needed..."
# Check if PostgreSQL database is empty (only has default tables)
POSTGRES_TABLES=$(docker-compose exec -T backend python manage.py shell -c "
from django.db import connection
cursor = connection.cursor()
cursor.execute(\"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name NOT LIKE 'django_%'\")
print(cursor.fetchone()[0])
" 2>/dev/null | tail -1 || echo "0")
# Check if SQLite has data
SQLITE_HAS_DATA=$(docker-compose exec -T backend bash -c "
export DATABASE_URL=sqlite:///db.sqlite3
python manage.py shell -c \"
from django.contrib.auth.models import User
from django.db import connection
cursor = connection.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name NOT LIKE \"sqlite_%\" AND name NOT LIKE \"django_%\"')
tables = cursor.fetchall()
has_data = False
for table in tables:
cursor.execute(f'SELECT COUNT(*) FROM {table[0]}')
if cursor.fetchone()[0] > 0:
has_data = True
break
print('1' if has_data else '0')
\" 2>/dev/null
" | tail -1 || echo "0")
if [ "$SQLITE_HAS_DATA" = "1" ] && [ "$POSTGRES_TABLES" = "0" ] || [ "$POSTGRES_TABLES" -lt 5 ]; then
echo "📦 SQLite database has data. Starting migration to PostgreSQL..."
echo " This may take a few minutes..."
echo ""
# Run migration script
if [ -f "./migrate-sqlite-to-postgres.sh" ]; then
./migrate-sqlite-to-postgres.sh
else
echo "⚠️ Migration script not found. Please run manually:"
echo " ./migrate-sqlite-to-postgres.sh"
fi
else
echo "✅ No migration needed (PostgreSQL already has data or SQLite is empty)"
touch .migrated_to_postgres
fi
fi
fi
# Run migrations
echo "📦 Running database migrations..."
docker-compose exec -T backend python manage.py migrate --noinput
# Collect static files
echo "📁 Collecting static files..."
docker-compose exec -T backend python manage.py collectstatic --noinput
# Check health
echo "🏥 Checking service health..."
docker-compose ps
echo ""
echo "✅ GNX Web Application is running!"
echo ""
echo "Backend: http://localhost:1086"
echo "Frontend: http://localhost:1087"
echo "Nginx: Configured and running"
echo ""
echo "View logs: docker-compose logs -f"
echo "Stop services: docker-compose down"
echo ""
echo "📋 Nginx config location: $NGINX_AVAILABLE"

View File

@@ -1,26 +0,0 @@
node_modules
.next
.git
.gitignore
*.log
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.DS_Store
.vscode
.idea
*.swp
*.swo
*~
coverage
.nyc_output
dist
build
README.md
*.md

View File

@@ -1,50 +0,0 @@
# Next.js Frontend Dockerfile
FROM node:20-alpine AS base
# Install dependencies only when needed
FROM base AS deps
RUN apk add --no-cache libc6-compat
WORKDIR /app
# Copy package files
COPY package*.json ./
RUN npm ci
# Rebuild the source code only when needed
FROM base AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Set environment variables for build
ENV NEXT_TELEMETRY_DISABLED=1
ENV NODE_ENV=production
# Build Next.js
RUN npm run build
# Production image, copy all the files and run next
FROM base AS runner
WORKDIR /app
ENV NODE_ENV=production
ENV NEXT_TELEMETRY_DISABLED=1
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
# Copy necessary files from builder
COPY --from=builder /app/public ./public
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
USER nextjs
EXPOSE 1087
ENV PORT=1087
ENV HOSTNAME="0.0.0.0"
# Use the standalone server
CMD ["node", "server.js"]

View File

@@ -19,6 +19,11 @@ interface ServicePageProps {
}>; }>;
} }
// Force static generation - pages are pre-rendered at build time
export const dynamic = 'force-static';
export const dynamicParams = false; // Return 404 for unknown slugs
export const revalidate = false; // Never revalidate - fully static
// Generate static params for all services (optional - for better performance) // Generate static params for all services (optional - for better performance)
export async function generateStaticParams() { export async function generateStaticParams() {
try { try {
@@ -27,6 +32,7 @@ export async function generateStaticParams() {
slug: service.slug, slug: service.slug,
})); }));
} catch (error) { } catch (error) {
console.error('Error generating static params for services:', error);
return []; return [];
} }
} }

View File

@@ -28,7 +28,8 @@ const SupportCenterPage = () => {
url: "/support-center", url: "/support-center",
}); });
document.title = metadata.title || "Support Center | GNX Soft"; const titleString = typeof metadata.title === 'string' ? metadata.title : "Support Center | GNX Soft";
document.title = titleString;
let metaDescription = document.querySelector('meta[name="description"]'); let metaDescription = document.querySelector('meta[name="description"]');
if (!metaDescription) { if (!metaDescription) {

View File

@@ -12,6 +12,8 @@ const Process = ({ slug }: ProcessProps) => {
return null; return null;
} }
const processSteps = caseStudy.process_steps;
return ( return (
<section className="case-study-process luxury-process pt-120 pb-120"> <section className="case-study-process luxury-process pt-120 pb-120">
<div className="container"> <div className="container">
@@ -28,7 +30,7 @@ const Process = ({ slug }: ProcessProps) => {
</div> </div>
<div className="col-12 col-lg-7"> <div className="col-12 col-lg-7">
<div className="process-steps-list"> <div className="process-steps-list">
{caseStudy.process_steps.map((step, index) => ( {processSteps.map((step, index) => (
<div key={step.id} className="process-step-item"> <div key={step.id} className="process-step-item">
<div className="step-number"> <div className="step-number">
{String(step.step_number).padStart(2, '0')} {String(step.step_number).padStart(2, '0')}
@@ -37,7 +39,7 @@ const Process = ({ slug }: ProcessProps) => {
<h4 className="step-title">{step.title}</h4> <h4 className="step-title">{step.title}</h4>
<p className="step-description">{step.description}</p> <p className="step-description">{step.description}</p>
</div> </div>
{index < caseStudy.process_steps.length - 1 && ( {index < processSteps.length - 1 && (
<div className="step-connector"></div> <div className="step-connector"></div>
)} )}
</div> </div>

View File

@@ -27,7 +27,7 @@ const KnowledgeBase = () => {
const filtered = allArticles.filter(article => const filtered = allArticles.filter(article =>
article.title.toLowerCase().includes(searchTerm.toLowerCase()) || article.title.toLowerCase().includes(searchTerm.toLowerCase()) ||
article.summary.toLowerCase().includes(searchTerm.toLowerCase()) || article.summary.toLowerCase().includes(searchTerm.toLowerCase()) ||
article.content.toLowerCase().includes(searchTerm.toLowerCase()) (article.content && article.content.toLowerCase().includes(searchTerm.toLowerCase()))
); );
return { return {
displayArticles: filtered, displayArticles: filtered,

View File

@@ -70,7 +70,6 @@ const SmoothScroll = () => {
gestureOrientation: 'vertical', gestureOrientation: 'vertical',
smoothWheel: true, smoothWheel: true,
wheelMultiplier: 1, wheelMultiplier: 1,
smoothTouch: false,
touchMultiplier: 2, touchMultiplier: 2,
infinite: false, infinite: false,
}); });

View File

@@ -1,4 +1,4 @@
import { API_CONFIG } from '../config/api'; import { API_CONFIG, getApiHeaders } from '../config/api';
// Types for Service API // Types for Service API
export interface ServiceFeature { export interface ServiceFeature {
@@ -104,9 +104,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -134,9 +132,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -164,9 +160,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -194,9 +188,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -224,9 +216,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -254,9 +244,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -284,9 +272,7 @@ export const serviceService = {
const response = await fetch(url, { const response = await fetch(url, {
method: 'GET', method: 'GET',
headers: { headers: getApiHeaders(),
'Content-Type': 'application/json',
},
}); });
if (!response.ok) { if (!response.ok) {
@@ -442,21 +428,30 @@ export const serviceUtils = {
}, },
// Get service image URL // Get service image URL
// Use relative URLs for same-domain images (Next.js can optimize via rewrites)
// Use absolute URLs only for external images
getServiceImageUrl: (service: Service): string => { getServiceImageUrl: (service: Service): string => {
// If service has an uploaded image // If service has an uploaded image
if (service.image && typeof service.image === 'string' && service.image.startsWith('/media/')) { if (service.image && typeof service.image === 'string' && service.image.startsWith('/media/')) {
return `${API_CONFIG.BASE_URL}${service.image}`; // Use relative URL - Next.js rewrite will handle fetching from backend during optimization
return service.image;
} }
// If service has an image_url // If service has an image_url
if (service.image_url) { if (service.image_url) {
if (service.image_url.startsWith('http')) { if (service.image_url.startsWith('http')) {
// External URL - keep as absolute
return service.image_url; return service.image_url;
} }
return `${API_CONFIG.BASE_URL}${service.image_url}`; if (service.image_url.startsWith('/media/')) {
// Same domain media - use relative URL
return service.image_url;
}
// Other relative URLs
return service.image_url;
} }
// Fallback to default image // Fallback to default image (relative is fine for public images)
return '/images/service/default.png'; return '/images/service/default.png';
}, },

View File

@@ -6,17 +6,62 @@
* In Production: Uses Next.js rewrites/nginx proxy at /api (internal network only) * In Production: Uses Next.js rewrites/nginx proxy at /api (internal network only)
*/ */
// Production: Use relative URLs (nginx proxy) // Production: Use relative URLs (nginx proxy) for client-side
// Development: Use full backend URL // For server-side (SSR), use internal backend URL or public domain
// Docker: Use backend service name or port 1086
const isProduction = process.env.NODE_ENV === 'production'; const isProduction = process.env.NODE_ENV === 'production';
const isDocker = process.env.DOCKER_ENV === 'true';
export const API_BASE_URL = isDocker // Detect if we're on the server (Node.js) or client (browser)
? (process.env.NEXT_PUBLIC_API_URL || 'http://backend:1086') const isServer = typeof window === 'undefined';
: isProduction
? '' // Use relative URLs in production (proxied by nginx) // For server-side rendering, we need an absolute URL
: (process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8000'); // During build time, use internal backend URL directly (faster, no SSL issues)
// At runtime, use public domain (goes through nginx which adds API key header)
const getServerApiUrl = () => {
if (isProduction) {
// Check if we're in build context (no access to window, and NEXT_PHASE might be set)
// During build, use internal backend URL directly
// At runtime (SSR), use public domain through nginx
const isBuildTime = process.env.NEXT_PHASE === 'phase-production-build' ||
!process.env.NEXT_RUNTIME;
if (isBuildTime) {
// Build time: use internal backend URL directly
return process.env.INTERNAL_API_URL || 'http://127.0.0.1:1086';
} else {
// Runtime SSR: use public domain - nginx will proxy and add API key header
return process.env.NEXT_PUBLIC_SITE_URL || 'https://gnxsoft.com';
}
}
return process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086';
};
// For client-side, use relative URLs in production (proxied by nginx)
// For server-side, use absolute URLs
export const API_BASE_URL = isServer
? getServerApiUrl() // Server-side: absolute URL
: (isProduction
? '' // Client-side production: relative URLs (proxied by nginx)
: (process.env.NEXT_PUBLIC_API_URL || 'http://127.0.0.1:1086')); // Development: direct backend
// Internal API key for server-side requests (must match backend INTERNAL_API_KEY)
// This is only used when calling backend directly (build time or internal requests)
export const INTERNAL_API_KEY = process.env.INTERNAL_API_KEY || '9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M';
// Helper to get headers for API requests
// Adds API key header when calling internal backend directly
export const getApiHeaders = (): Record<string, string> => {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
// If we're calling the internal backend directly (not through nginx),
// add the API key header
if (isServer && API_BASE_URL.includes('127.0.0.1:1086')) {
headers['X-Internal-API-Key'] = INTERNAL_API_KEY;
}
return headers;
};
export const API_CONFIG = { export const API_CONFIG = {
// Django API Base URL // Django API Base URL

View File

@@ -1,8 +1,10 @@
/** @type {import('next').NextConfig} */ /** @type {import('next').NextConfig} */
const nextConfig = { const nextConfig = {
// Enable standalone output for Docker // Enable standalone output for optimized production deployment
output: 'standalone', output: 'standalone',
images: { images: {
// Enable image optimization in standalone mode
unoptimized: false,
remotePatterns: [ remotePatterns: [
{ {
protocol: 'http', protocol: 'http',
@@ -33,15 +35,60 @@ const nextConfig = {
hostname: 'images.unsplash.com', hostname: 'images.unsplash.com',
pathname: '/**', pathname: '/**',
}, },
// Add your production domain when ready // Production domain configuration
// { {
// protocol: 'https', protocol: 'https',
// hostname: 'your-api-domain.com', hostname: 'gnxsoft.com',
// pathname: '/media/**', pathname: '/media/**',
// }, },
{
protocol: 'https',
hostname: 'gnxsoft.com',
pathname: '/images/**',
},
{
protocol: 'https',
hostname: 'gnxsoft.com',
pathname: '/_next/static/**',
},
{
protocol: 'http',
hostname: 'gnxsoft.com',
pathname: '/media/**',
},
{
protocol: 'http',
hostname: 'gnxsoft.com',
pathname: '/images/**',
},
{
protocol: 'https',
hostname: 'www.gnxsoft.com',
pathname: '/media/**',
},
{
protocol: 'https',
hostname: 'www.gnxsoft.com',
pathname: '/images/**',
},
{
protocol: 'https',
hostname: 'www.gnxsoft.com',
pathname: '/_next/static/**',
},
{
protocol: 'http',
hostname: 'www.gnxsoft.com',
pathname: '/media/**',
},
{
protocol: 'http',
hostname: 'www.gnxsoft.com',
pathname: '/images/**',
},
], ],
// Legacy domains format for additional compatibility // Legacy domains format for additional compatibility
domains: ['images.unsplash.com'], domains: ['images.unsplash.com', 'gnxsoft.com', 'www.gnxsoft.com'],
formats: ['image/avif', 'image/webp'], formats: ['image/avif', 'image/webp'],
deviceSizes: [640, 750, 828, 1080, 1200, 1920, 2048, 3840], deviceSizes: [640, 750, 828, 1080, 1200, 1920, 2048, 3840],
imageSizes: [16, 32, 48, 64, 96, 128, 256, 384], imageSizes: [16, 32, 48, 64, 96, 128, 256, 384],
@@ -99,7 +146,7 @@ const nextConfig = {
}, },
{ {
key: 'Content-Security-Policy', key: 'Content-Security-Policy',
value: "default-src 'self'; script-src 'self' 'unsafe-eval' 'unsafe-inline' https://www.googletagmanager.com https://www.google-analytics.com; style-src 'self' 'unsafe-inline'; img-src 'self' data: https: http://localhost:8000 http://localhost:8080; font-src 'self' data:; connect-src 'self' http://localhost:8000 https://www.google-analytics.com; frame-src 'self' https://www.google.com; frame-ancestors 'self'; base-uri 'self'; form-action 'self'" value: "default-src 'self'; script-src 'self' 'unsafe-eval' 'unsafe-inline' https://www.googletagmanager.com https://www.google-analytics.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https: http://localhost:8000 http://localhost:8080; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' http://localhost:8000 https://www.google-analytics.com; frame-src 'self' https://www.google.com; frame-ancestors 'self'; base-uri 'self'; form-action 'self'"
}, },
// Performance Headers // Performance Headers
{ {
@@ -153,7 +200,6 @@ const nextConfig = {
// Rewrites for API proxy (Production: routes /api to backend through nginx) // Rewrites for API proxy (Production: routes /api to backend through nginx)
async rewrites() { async rewrites() {
// In development, proxy to Django backend // In development, proxy to Django backend
// In production, nginx handles this
if (process.env.NODE_ENV === 'development') { if (process.env.NODE_ENV === 'development') {
return [ return [
{ {
@@ -166,8 +212,14 @@ const nextConfig = {
}, },
] ]
} }
// In production, these are handled by nginx reverse proxy // In production, add rewrite for media files so Next.js image optimization can access them
return [] // This allows Next.js to fetch media images from the internal backend during optimization
return [
{
source: '/media/:path*',
destination: `${process.env.INTERNAL_API_URL || 'http://127.0.0.1:1086'}/media/:path*`,
},
]
}, },
} }

93
install-postgresql.sh Executable file
View File

@@ -0,0 +1,93 @@
#!/bin/bash
# PostgreSQL Installation and Configuration Script for GNX-WEB
# This script installs PostgreSQL and configures it to use port 5433
# to avoid conflicts with Docker PostgreSQL instance on port 5432
set -e
echo "=========================================="
echo "PostgreSQL Installation Script"
echo "=========================================="
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Check if running as root
if [ "$EUID" -ne 0 ]; then
echo -e "${RED}Please run as root (use sudo)${NC}"
exit 1
fi
# Update package list
echo -e "${GREEN}[1/7] Updating package list...${NC}"
apt-get update
# Install PostgreSQL
echo -e "${GREEN}[2/7] Installing PostgreSQL...${NC}"
apt-get install -y postgresql postgresql-contrib
# Get PostgreSQL version
PG_VERSION=$(psql --version | grep -oP '\d+' | head -1)
PG_MAJOR_VERSION=$(echo $PG_VERSION | cut -d. -f1)
echo -e "${GREEN}[3/7] PostgreSQL version: $PG_VERSION${NC}"
# Find postgresql.conf file
PG_CONF="/etc/postgresql/$PG_MAJOR_VERSION/main/postgresql.conf"
if [ ! -f "$PG_CONF" ]; then
echo -e "${RED}Error: Could not find PostgreSQL configuration file${NC}"
exit 1
fi
# Backup original configuration
echo -e "${GREEN}[4/7] Backing up PostgreSQL configuration...${NC}"
cp "$PG_CONF" "${PG_CONF}.backup.$(date +%Y%m%d_%H%M%S)"
# Configure PostgreSQL to use port 5433
echo -e "${GREEN}[5/7] Configuring PostgreSQL to use port 5433...${NC}"
# Check if port is already set
if grep -q "^port = " "$PG_CONF"; then
# Replace existing port setting
sed -i "s/^port = .*/port = 5433/" "$PG_CONF"
else
# Add port setting
echo "port = 5433" >> "$PG_CONF"
fi
# Restart PostgreSQL
echo -e "${GREEN}[6/7] Restarting PostgreSQL...${NC}"
systemctl restart postgresql
# Wait for PostgreSQL to start
sleep 2
# Verify PostgreSQL is running on port 5433
if netstat -tlnp 2>/dev/null | grep -q ":5433" || ss -tlnp 2>/dev/null | grep -q ":5433"; then
echo -e "${GREEN}[7/7] PostgreSQL is running on port 5433${NC}"
else
echo -e "${YELLOW}Warning: Could not verify PostgreSQL is running on port 5433${NC}"
fi
echo ""
echo -e "${GREEN}=========================================="
echo "PostgreSQL Installation Complete!"
echo "==========================================${NC}"
echo ""
echo "Next steps:"
echo "1. Create database and user:"
echo " sudo -u postgres psql"
echo " CREATE DATABASE gnx_db;"
echo " CREATE USER gnx_user WITH PASSWORD 'your_password';"
echo " GRANT ALL PRIVILEGES ON DATABASE gnx_db TO gnx_user;"
echo " \\q"
echo ""
echo "2. Update your .env file with:"
echo " DATABASE_URL=postgresql://gnx_user:your_password@localhost:5433/gnx_db"
echo ""

View File

@@ -1,78 +0,0 @@
#!/bin/bash
# Simplified script to migrate SQLite data to PostgreSQL
set -e
echo "🔄 Migrating data from SQLite to PostgreSQL..."
# Load environment
if [ -f .env.production ]; then
export $(cat .env.production | grep -v '^#' | xargs)
fi
# Check if SQLite exists
if [ ! -f "./backEnd/db.sqlite3" ]; then
echo "❌ SQLite database not found"
exit 1
fi
# Ensure containers are running
if ! docker-compose ps | grep -q "backend.*Up"; then
echo "▶️ Starting containers..."
docker-compose up -d
sleep 10
fi
# Wait for PostgreSQL
echo "⏳ Waiting for PostgreSQL..."
timeout=30
while [ $timeout -gt 0 ]; do
if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then
break
fi
sleep 2
timeout=$((timeout - 2))
done
# Create backup directory
mkdir -p ./backups
BACKUP_FILE="./backups/sqlite_export_$(date +%Y%m%d_%H%M%S).json"
echo "📦 Exporting from SQLite..."
# Export using SQLite database
docker-compose exec -T backend bash -c "
# Temporarily use SQLite
export DATABASE_URL=sqlite:///db.sqlite3
python manage.py dumpdata --natural-foreign --natural-primary \
--exclude auth.permission \
--exclude contenttypes \
--indent 2 > /tmp/sqlite_export.json 2>&1
cat /tmp/sqlite_export.json
" > "$BACKUP_FILE"
echo "✅ Exported to $BACKUP_FILE"
# Run migrations on PostgreSQL
echo "📦 Running migrations on PostgreSQL..."
docker-compose exec -T backend python manage.py migrate --noinput
# Import into PostgreSQL
echo "📥 Importing into PostgreSQL..."
docker-compose exec -T backend bash -c "
python manage.py loaddata /tmp/sqlite_export.json 2>&1 || echo 'Import completed with warnings'
"
echo "✅ Migration completed!"
echo ""
echo "📊 Verifying migration..."
# Count records
echo " Checking user count..."
USERS=$(docker-compose exec -T backend python manage.py shell -c "from django.contrib.auth.models import User; print(User.objects.count())" 2>/dev/null | tail -1)
echo " Users in PostgreSQL: $USERS"
touch .migrated_to_postgres
echo ""
echo "✅ Migration complete! Backend is now using PostgreSQL."

View File

@@ -1,133 +0,0 @@
#!/bin/bash
# Script to migrate data from SQLite to PostgreSQL
set -e
echo "🔄 Starting SQLite to PostgreSQL Migration..."
# Check if SQLite database exists
SQLITE_DB="./backEnd/db.sqlite3"
if [ ! -f "$SQLITE_DB" ]; then
echo "❌ SQLite database not found at $SQLITE_DB"
exit 1
fi
echo "✅ Found SQLite database"
# Check if PostgreSQL is running
if ! docker-compose ps postgres | grep -q "Up"; then
echo "❌ PostgreSQL container is not running. Please start it first:"
echo " docker-compose up -d postgres"
exit 1
fi
echo "✅ PostgreSQL container is running"
# Load environment variables
if [ -f .env.production ]; then
export $(cat .env.production | grep -v '^#' | xargs)
fi
# Check if DATABASE_URL is set for PostgreSQL
if [ -z "$DATABASE_URL" ] || ! echo "$DATABASE_URL" | grep -q "postgresql://"; then
echo "❌ DATABASE_URL is not set to PostgreSQL"
echo " Please update .env.production with PostgreSQL DATABASE_URL"
exit 1
fi
echo "✅ PostgreSQL DATABASE_URL is configured"
# Create backup directory
BACKUP_DIR="./backups"
mkdir -p "$BACKUP_DIR"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="$BACKUP_DIR/sqlite_backup_$TIMESTAMP.json"
echo "📦 Exporting data from SQLite..."
echo " Backup will be saved to: $BACKUP_FILE"
# Export data from SQLite using Django's dumpdata
# First, temporarily switch to SQLite
docker-compose exec -T backend bash -c "
export DATABASE_URL=sqlite:///db.sqlite3
python manage.py dumpdata --natural-foreign --natural-primary --exclude auth.permission --exclude contenttypes > /tmp/sqlite_export.json 2>&1 || true
cat /tmp/sqlite_export.json
" > "$BACKUP_FILE"
# Check if export was successful
if [ ! -s "$BACKUP_FILE" ] || grep -q "Error\|Traceback\|Exception" "$BACKUP_FILE"; then
echo "⚠️ Warning: Export may have issues, but continuing..."
fi
echo "✅ Data exported to $BACKUP_FILE"
echo " File size: $(du -h "$BACKUP_FILE" | cut -f1)"
# Wait for PostgreSQL to be ready
echo "⏳ Waiting for PostgreSQL to be ready..."
timeout=30
while [ $timeout -gt 0 ]; do
if docker-compose exec -T postgres pg_isready -U ${POSTGRES_USER:-gnx} > /dev/null 2>&1; then
echo "✅ PostgreSQL is ready"
break
fi
echo " Waiting for PostgreSQL... ($timeout seconds remaining)"
sleep 2
timeout=$((timeout - 2))
done
if [ $timeout -le 0 ]; then
echo "❌ PostgreSQL is not ready. Please check the logs:"
echo " docker-compose logs postgres"
exit 1
fi
# Create database if it doesn't exist
echo "📊 Ensuring PostgreSQL database exists..."
docker-compose exec -T postgres psql -U ${POSTGRES_USER:-gnx} -d postgres -c "SELECT 1 FROM pg_database WHERE datname='${POSTGRES_DB:-gnxdb}'" | grep -q 1 || \
docker-compose exec -T postgres psql -U ${POSTGRES_USER:-gnx} -d postgres -c "CREATE DATABASE ${POSTGRES_DB:-gnxdb};"
echo "✅ Database exists or created"
# Run migrations on PostgreSQL
echo "📦 Running migrations on PostgreSQL..."
docker-compose exec -T backend python manage.py migrate --noinput
echo "✅ Migrations completed"
# Import data into PostgreSQL
echo "📥 Importing data into PostgreSQL..."
if docker-compose exec -T backend bash -c "python manage.py loaddata /tmp/sqlite_export.json" < "$BACKUP_FILE" 2>&1 | tee /tmp/import_log.txt; then
echo "✅ Data imported successfully"
else
echo "⚠️ Warning: Some data may not have imported. Check the log above."
echo " You can retry the import manually:"
echo " docker-compose exec backend python manage.py loaddata /tmp/sqlite_export.json"
fi
# Verify data transfer
echo "🔍 Verifying data transfer..."
SQLITE_COUNT=$(docker-compose exec -T backend bash -c "export DATABASE_URL=sqlite:///db.sqlite3 && python manage.py shell -c \"from django.contrib.auth.models import User; print(User.objects.count())\"" 2>/dev/null | tail -1 || echo "0")
POSTGRES_COUNT=$(docker-compose exec -T backend python manage.py shell -c "from django.contrib.auth.models import User; print(User.objects.count())" 2>/dev/null | tail -1 || echo "0")
echo ""
echo "📊 Migration Summary:"
echo " SQLite Users: $SQLITE_COUNT"
echo " PostgreSQL Users: $POSTGRES_COUNT"
echo ""
# Create a flag file to indicate migration is complete
touch .migrated_to_postgres
echo "✅ Migration completed!"
echo ""
echo "📋 Next steps:"
echo " 1. Verify the data in PostgreSQL:"
echo " docker-compose exec backend python manage.py shell"
echo ""
echo " 2. Test the application with PostgreSQL"
echo ""
echo " 3. Once verified, you can backup and remove SQLite:"
echo " mv backEnd/db.sqlite3 backEnd/db.sqlite3.backup"
echo ""
echo " Backup file saved at: $BACKUP_FILE"

View File

@@ -1,23 +1,31 @@
# Production Nginx Configuration for GNX Soft # Production Nginx Configuration for GNX Soft
# Place this in /etc/nginx/sites-available/gnxsoft # Place this in /etc/nginx/sites-available/gnxsoft
# Symlink to /etc/nginx/sites-enabled/gnxsoft
# #
# DEPLOYMENT NOTES: # DEPLOYMENT NOTES (Host Deployment):
# 1. Frontend: Next.js production build runs on port 3000 # 1. Frontend: Next.js production build runs on port 1087
# - Build: npm run build # - Build: cd frontEnd && npm run build
# - Start: npm start (or use PM2: pm2 start npm --name "gnxsoft-frontend" -- start) # - Start: Use start-services.sh script or PM2: PORT=1087 pm2 start npm --name "gnxsoft-frontend" -- start
# 2. Backend: Django runs on port 8000 (internal only) # 2. Backend: Django runs on port 1086 (internal only)
# - Use Gunicorn: gunicorn gnx.wsgi:application --bind 127.0.0.1:8000 # - Use start-services.sh script or PM2: gunicorn gnx.wsgi:application --bind 127.0.0.1:1086 --workers 3
# - Or PM2: pm2 start gunicorn --name "gnxsoft-backend" -- gnx.wsgi:application --bind 127.0.0.1:8000 # 3. Database: PostgreSQL on host (port 5433 to avoid conflict with Docker instance on 5432)
# 4. Use install-postgresql.sh to install and configure PostgreSQL
# 5. Use start-services.sh to start both backend and frontend services
#
# NOTE: Rate limiting zones must be defined in the main nginx.conf http context
# Add these lines to /etc/nginx/nginx.conf inside the http {} block:
# limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
# limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s;
# Frontend - Public facing (Next.js Production Server) # Frontend - Public facing (Next.js Production Server on port 1087)
upstream frontend { upstream frontend {
server 127.0.0.1:3000; server 127.0.0.1:1087;
keepalive 64; keepalive 64;
} }
# Backend - Internal only (Django) # Backend - Internal only (Django on port 1086)
upstream backend_internal { upstream backend_internal {
server 127.0.0.1:8000; server 127.0.0.1:1086;
keepalive 64; keepalive 64;
} }
@@ -61,9 +69,7 @@ server {
add_header Referrer-Policy "strict-origin-when-cross-origin" always; add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), interest-cohort=()" always; add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), interest-cohort=()" always;
# Rate Limiting Zones # Rate Limiting (zones must be defined in main nginx.conf http context)
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s;
limit_req_status 429; limit_req_status 429;
# Client settings # Client settings
@@ -75,7 +81,134 @@ server {
access_log /var/log/nginx/gnxsoft_access.log; access_log /var/log/nginx/gnxsoft_access.log;
error_log /var/log/nginx/gnxsoft_error.log warn; error_log /var/log/nginx/gnxsoft_error.log warn;
# Root location - Frontend (Next.js) # IMPORTANT: More specific location blocks MUST come before location /
# Order matters in nginx - longest match wins
# API Proxy - Frontend talks to backend ONLY through this internal proxy
# Backend port 1086 is BLOCKED from internet by firewall
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
# Internal proxy to backend (127.0.0.1:1086)
# Backend is NOT accessible from public internet
proxy_pass http://backend_internal/api/;
proxy_http_version 1.1;
# Add internal API key (must match INTERNAL_API_KEY in Django .env)
set $api_key "9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M";
proxy_set_header X-Internal-API-Key $api_key;
# Backend sees request as coming from localhost
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
# Hide backend server info
proxy_hide_header X-Powered-By;
proxy_hide_header Server;
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# CORS headers (if needed)
add_header Access-Control-Allow-Origin "https://gnxsoft.com" always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type, X-Internal-API-Key" always;
add_header Access-Control-Allow-Credentials "true" always;
# Handle preflight requests
if ($request_method = 'OPTIONS') {
return 204;
}
}
# Media files (served by nginx directly for better performance)
location /media/ {
alias /var/www/GNX-WEB/backEnd/media/;
expires 30d;
add_header Cache-Control "public, immutable";
access_log off;
# Security
location ~ \.(php|py|pl|sh)$ {
deny all;
}
}
# Static files (served by nginx directly)
location /static/ {
alias /var/www/GNX-WEB/backEnd/staticfiles/;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Next.js image optimization API - must be proxied to Next.js server
# Use regex to match /_next/image with query strings
location ~ ^/_next/image {
proxy_pass http://frontend;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
# Preserve query string
proxy_pass_request_headers on;
# Timeouts for image processing
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# Buffer settings for image processing
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
# Cache optimized images
proxy_cache_valid 200 1d;
add_header Cache-Control "public, max-age=86400";
}
# Next.js static files - serve directly from filesystem for better performance
location /_next/static/ {
alias /var/www/GNX-WEB/frontEnd/.next/static/;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
# Correct MIME types
types {
text/css css;
application/javascript js;
application/json json;
font/woff2 woff2;
font/woff woff;
font/ttf ttf;
image/png png;
image/jpeg jpg jpeg;
image/webp webp;
image/svg+xml svg;
}
}
# Frontend public images
location /images/ {
alias /var/www/GNX-WEB/frontEnd/public/images/;
expires 30d;
add_header Cache-Control "public, immutable";
access_log off;
}
# Root location - Frontend (Next.js) - MUST be last
location / { location / {
limit_req zone=general_limit burst=50 nodelay; limit_req zone=general_limit burst=50 nodelay;
@@ -95,72 +228,6 @@ server {
proxy_read_timeout 60s; proxy_read_timeout 60s;
} }
# API Proxy - Frontend talks to backend ONLY through this internal proxy
# Backend port 8000 is BLOCKED from internet by firewall
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
# Internal proxy to backend (127.0.0.1:8000)
# Backend is NOT accessible from public internet
proxy_pass http://backend_internal/api/;
proxy_http_version 1.1;
# Backend sees request as coming from localhost
proxy_set_header Host $host;
proxy_set_header X-Real-IP 127.0.0.1;
proxy_set_header X-Forwarded-For 127.0.0.1;
proxy_set_header X-Forwarded-Proto $scheme;
# Hide backend server info
proxy_hide_header X-Powered-By;
proxy_hide_header Server;
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# CORS headers (if needed)
add_header Access-Control-Allow-Origin "https://gnxsoft.com" always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type" always;
add_header Access-Control-Allow-Credentials "true" always;
# Handle preflight requests
if ($request_method = 'OPTIONS') {
return 204;
}
}
# Media files (served by nginx directly for better performance)
location /media/ {
alias /var/www/gnxsoft/media/;
expires 30d;
add_header Cache-Control "public, immutable";
access_log off;
# Security
location ~ \.(php|py|pl|sh)$ {
deny all;
}
}
# Static files (served by nginx directly)
location /static/ {
alias /var/www/gnxsoft/static/;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Next.js static files
location /_next/static/ {
proxy_pass http://frontend;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Deny access to hidden files # Deny access to hidden files
location ~ /\. { location ~ /\. {
deny all; deny all;
@@ -168,10 +235,26 @@ server {
log_not_found off; log_not_found off;
} }
# Deny access to backend admin (extra security) # Admin panel - Proxy to backend (with IP restriction)
location /admin { location /admin/ {
deny all; # IP restriction is handled by Django middleware
return 404; # Add internal API key (must match INTERNAL_API_KEY in Django .env)
set $api_key "9hZtPwyScigoBAl59Uvcz_9VztSRC6Zt_6L1B2xTM2M";
proxy_set_header X-Internal-API-Key $api_key;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://backend_internal;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Connection "";
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
} }
# Health check endpoint # Health check endpoint
@@ -185,11 +268,13 @@ server {
# ============================================================================== # ==============================================================================
# IMPORTANT SECURITY NOTES: # IMPORTANT SECURITY NOTES:
# ============================================================================== # ==============================================================================
# 1. Backend runs on 127.0.0.1:8000 (internal only) # 1. Backend runs on 127.0.0.1:1086 (internal only)
# 2. Firewall BLOCKS external access to port 8000 # 2. Frontend runs on 127.0.0.1:1087 (internal only)
# 3. Only nginx can reach backend (internal network) # 3. Firewall BLOCKS external access to ports 1086 and 1087
# 4. Public internet can ONLY access nginx (ports 80, 443) # 4. Only nginx can reach backend/frontend (internal network)
# 5. All API calls go through nginx proxy (/api/* → 127.0.0.1:8000/api/*) # 5. Public internet can ONLY access nginx (ports 80, 443)
# 6. Backend IP whitelist middleware ensures only localhost requests # 6. All API calls go through nginx proxy (/api/* → 127.0.0.1:1086/api/*)
# 7. Backend IP whitelist middleware ensures only localhost requests
# 8. Rate limiting zones must be added to /etc/nginx/nginx.conf http {} block
# 9. PostgreSQL runs on port 5433 (to avoid conflict with Docker on 5432)
# ============================================================================== # ==============================================================================

View File

@@ -0,0 +1,7 @@
# Rate Limiting Zones for GNX-WEB
# Add these lines to /etc/nginx/nginx.conf inside the http {} block
# Or include this file in /etc/nginx/nginx.conf with: include /etc/nginx/conf.d/rate-limit-zones.conf;
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s;

View File

@@ -1,218 +0,0 @@
# Production Nginx Configuration for GNX Soft (Docker)
# This configuration is for nginx running on the host machine
# It proxies to Docker containers: backend (1086) and frontend (1087)
#
# IMPORTANT PORT CONFIGURATION:
# - Backend (Django): Only accessible on port 1086 (internal)
# - Frontend (Next.js): Only accessible on port 1087 (internal)
# - Nginx: Public access on ports 80 (HTTP) and 443 (HTTPS)
# - Ports 1086 and 1087 should be blocked from external access by firewall
# Frontend - Next.js running in Docker on port 1087
# All frontend requests (/) are proxied here
upstream frontend {
server 127.0.0.1:1087;
keepalive 64;
}
# Backend - Django running in Docker on port 1086
# All API requests (/api/) and admin (/admin/) are proxied here
upstream backend_internal {
server 127.0.0.1:1086;
keepalive 64;
}
# Redirect HTTP to HTTPS
server {
listen 80;
listen [::]:80;
server_name gnxsoft.com www.gnxsoft.com;
# Let's Encrypt validation
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
# Redirect all other traffic to HTTPS
location / {
return 301 https://$server_name$request_uri;
}
}
# HTTPS Server
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name gnxsoft.com www.gnxsoft.com;
# SSL Configuration
ssl_certificate /etc/letsencrypt/live/gnxsoft.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gnxsoft.com/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security Headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), interest-cohort=()" always;
# Rate Limiting Zones
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=general_limit:10m rate=100r/s;
limit_req_status 429;
# Client settings
client_max_body_size 10M;
client_body_timeout 30s;
client_header_timeout 30s;
# Logging
access_log /var/log/nginx/gnxsoft_access.log;
error_log /var/log/nginx/gnxsoft_error.log warn;
# Root location - Frontend (Next.js on port 1087)
location / {
limit_req zone=general_limit burst=50 nodelay;
proxy_pass http://frontend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# API Proxy - Frontend talks to backend through this proxy
# Backend runs in Docker on port 1086 (internal only)
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
# Internal proxy to backend Docker container (127.0.0.1:1086)
proxy_pass http://backend_internal/api/;
proxy_http_version 1.1;
# Backend sees request as coming from nginx
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
# Add internal API key (will be replaced by docker-start.sh)
set $api_key "PLACEHOLDER_INTERNAL_API_KEY";
proxy_set_header X-Internal-API-Key $api_key;
# Hide backend server info
proxy_hide_header X-Powered-By;
proxy_hide_header Server;
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# CORS headers (if needed)
add_header Access-Control-Allow-Origin "https://gnxsoft.com" always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type, X-Internal-API-Key" always;
add_header Access-Control-Allow-Credentials "true" always;
# Handle preflight requests
if ($request_method = 'OPTIONS') {
return 204;
}
}
# Media files - Served from Docker volume
location /media/ {
alias /home/gnx/Desktop/GNX-WEB/backEnd/media/;
expires 30d;
add_header Cache-Control "public, immutable";
access_log off;
# Security - deny execution of scripts
location ~ \.(php|py|pl|sh)$ {
deny all;
}
}
# Static files - Served from Docker volume
location /static/ {
alias /home/gnx/Desktop/GNX-WEB/backEnd/staticfiles/;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Next.js static files
location /_next/static/ {
proxy_pass http://frontend;
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Admin panel - Proxy to backend (with IP restriction)
location /admin/ {
# IP restriction is handled by Django middleware
# Add internal API key (will be replaced by docker-start.sh)
set $api_key "PLACEHOLDER_INTERNAL_API_KEY";
proxy_set_header X-Internal-API-Key $api_key;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://backend_internal;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Connection "";
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
}
# Deny access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
# Health check endpoint
location /health {
access_log off;
return 200 "OK\n";
add_header Content-Type text/plain;
}
}
# ==============================================================================
# IMPORTANT NOTES:
# ==============================================================================
# 1. Backend runs in Docker on port 1086 (internal only)
# 2. Frontend runs in Docker on port 1087
# 3. Nginx runs on host and proxies to Docker containers
# 4. Firewall should BLOCK external access to ports 1086 and 1087
# 5. Only nginx (ports 80, 443) should be accessible from internet
# 6. Set INTERNAL_API_KEY environment variable in nginx config or systemd service
# 7. Update media/static paths to match your actual deployment location
# ==============================================================================

16
restart-services.sh Executable file
View File

@@ -0,0 +1,16 @@
#!/bin/bash
# GNX-WEB Service Restart Script
# Colors for output
GREEN='\033[0;32m'
NC='\033[0m' # No Color
echo -e "${GREEN}Restarting GNX-WEB services...${NC}"
# Restart PM2 services
pm2 restart gnxsoft-backend 2>/dev/null || echo "Backend service not found"
pm2 restart gnxsoft-frontend 2>/dev/null || echo "Frontend service not found"
echo -e "${GREEN}Services restarted${NC}"

View File

@@ -1,84 +0,0 @@
#!/bin/bash
# Initial setup script - Run this once after extracting the zip file
set -e
echo "🔧 GNX Web Application - Initial Setup"
echo "======================================"
echo ""
# Set all necessary permissions
echo "📋 Setting up file permissions..."
# Make all scripts executable
find . -name "*.sh" -type f -exec chmod +x {} \; 2>/dev/null || true
# Set directory permissions
mkdir -p backEnd/media backEnd/staticfiles backEnd/logs backups
chmod 755 backEnd/media backEnd/staticfiles backEnd/logs backups 2>/dev/null || true
# Set file permissions
if [ -f "backEnd/db.sqlite3" ]; then
chmod 644 backEnd/db.sqlite3 2>/dev/null || true
fi
if [ -f ".env.production" ]; then
chmod 600 .env.production 2>/dev/null || true
fi
# Ensure docker-start.sh is executable
chmod +x docker-start.sh 2>/dev/null || true
echo "✅ Permissions configured"
echo ""
# Check for required files
echo "📋 Checking required files..."
REQUIRED_FILES=(
"docker-compose.yml"
"nginx.conf"
".env.production"
"backEnd/Dockerfile"
"frontEnd/Dockerfile"
)
MISSING_FILES=()
for file in "${REQUIRED_FILES[@]}"; do
if [ ! -f "$file" ]; then
MISSING_FILES+=("$file")
fi
done
if [ ${#MISSING_FILES[@]} -gt 0 ]; then
echo "❌ Missing required files:"
for file in "${MISSING_FILES[@]}"; do
echo " - $file"
done
exit 1
fi
echo "✅ All required files present"
echo ""
# Check Docker
if ! command -v docker &> /dev/null; then
echo "❌ Docker is not installed. Please install Docker first."
exit 1
fi
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
echo "❌ Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
echo "✅ Docker is installed"
echo ""
echo "✅ Setup complete!"
echo ""
echo "📋 Next steps:"
echo " 1. Review and update .env.production with your settings"
echo " 2. Run: ./docker-start.sh"
echo ""

258
start-services.sh Executable file
View File

@@ -0,0 +1,258 @@
#!/bin/bash
# GNX-WEB Service Startup Script
# Starts backend on port 1086 and frontend on port 1087
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Get script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BACKEND_DIR="$SCRIPT_DIR/backEnd"
FRONTEND_DIR="$SCRIPT_DIR/frontEnd"
# Ports
BACKEND_PORT=1086
FRONTEND_PORT=1087
echo -e "${BLUE}=========================================="
echo "GNX-WEB Service Startup"
echo "==========================================${NC}"
# Check if PM2 is installed
if ! command -v pm2 &> /dev/null; then
echo -e "${YELLOW}PM2 is not installed. Installing PM2...${NC}"
npm install -g pm2
fi
# Function to check if port is in use
check_port() {
local port=$1
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || netstat -tlnp 2>/dev/null | grep -q ":$port " || ss -tlnp 2>/dev/null | grep -q ":$port "; then
return 0
else
return 1
fi
}
# Check if ports are available
if check_port $BACKEND_PORT; then
echo -e "${YELLOW}Port $BACKEND_PORT is already in use. Stopping existing service...${NC}"
pm2 delete gnxsoft-backend 2>/dev/null || true
sleep 2
fi
if check_port $FRONTEND_PORT; then
echo -e "${YELLOW}Port $FRONTEND_PORT is already in use. Stopping existing service...${NC}"
pm2 delete gnxsoft-frontend 2>/dev/null || true
sleep 2
fi
# Check if backend directory exists
if [ ! -d "$BACKEND_DIR" ]; then
echo -e "${RED}Error: Backend directory not found at $BACKEND_DIR${NC}"
exit 1
fi
# Check if frontend directory exists
if [ ! -d "$FRONTEND_DIR" ]; then
echo -e "${RED}Error: Frontend directory not found at $FRONTEND_DIR${NC}"
exit 1
fi
# Function to generate secure random key
generate_secret_key() {
python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \
openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1
}
# Check if backend .env exists
if [ ! -f "$BACKEND_DIR/.env" ]; then
echo -e "${YELLOW}Warning: Backend .env file not found. Creating from example...${NC}"
if [ -f "$BACKEND_DIR/production.env.example" ]; then
cp "$BACKEND_DIR/production.env.example" "$BACKEND_DIR/.env"
# Generate and update keys automatically
echo -e "${BLUE}Generating secure keys...${NC}"
SECRET_KEY=$(generate_secret_key 50)
INTERNAL_API_KEY=$(generate_secret_key 32)
# Update keys in .env file
sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env"
sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env"
sed -i "s|^STATIC_ROOT=.*|STATIC_ROOT=$BACKEND_DIR/staticfiles|" "$BACKEND_DIR/.env"
sed -i "s|^MEDIA_ROOT=.*|MEDIA_ROOT=$BACKEND_DIR/media|" "$BACKEND_DIR/.env"
echo -e "${GREEN}✓ Generated and updated SECRET_KEY and INTERNAL_API_KEY${NC}"
echo -e "${YELLOW}Please update other values in $BACKEND_DIR/.env${NC}"
else
echo -e "${RED}Error: production.env.example not found${NC}"
exit 1
fi
else
# Check if keys need to be generated
if grep -q "your-super-secret\|your-secure-api-key\|PLACEHOLDER" "$BACKEND_DIR/.env"; then
echo -e "${BLUE}Generating secure keys for existing .env file...${NC}"
SECRET_KEY=$(generate_secret_key 50)
INTERNAL_API_KEY=$(generate_secret_key 32)
# Update keys in .env file
sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env"
sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env"
echo -e "${GREEN}✓ Updated SECRET_KEY and INTERNAL_API_KEY${NC}"
# Update nginx config if it exists
if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then
echo -e "${BLUE}Updating nginx configuration with INTERNAL_API_KEY...${NC}"
escaped_key=$(echo "$INTERNAL_API_KEY" | sed 's/[[\.*^$()+?{|]/\\&/g')
sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" /etc/nginx/sites-available/gnxsoft
echo -e "${GREEN}✓ Updated nginx config with INTERNAL_API_KEY${NC}"
fi
fi
fi
# Start Backend
echo -e "${GREEN}[1/2] Starting Backend on port $BACKEND_PORT...${NC}"
cd "$BACKEND_DIR"
# Check if virtual environment exists
if [ ! -d "venv" ]; then
echo -e "${YELLOW}Virtual environment not found. Creating...${NC}"
python3 -m venv venv
fi
# Activate virtual environment
source venv/bin/activate
# Install/update dependencies
if [ ! -f "venv/.deps_installed" ]; then
echo -e "${BLUE}Installing Python dependencies...${NC}"
pip install -r requirements.txt
touch venv/.deps_installed
fi
# Run migrations
echo -e "${BLUE}Running database migrations...${NC}"
python manage.py migrate --noinput
# Collect static files
echo -e "${BLUE}Collecting static files...${NC}"
python manage.py collectstatic --noinput
# Create logs directory
mkdir -p logs
# Start backend with PM2
pm2 start gunicorn \
--name "gnxsoft-backend" \
--interpreter "$BACKEND_DIR/venv/bin/python" \
-- \
gnx.wsgi:application \
--bind 127.0.0.1:$BACKEND_PORT \
--workers 3 \
--timeout 120 \
--access-logfile "$BACKEND_DIR/logs/gunicorn_access.log" \
--error-logfile "$BACKEND_DIR/logs/gunicorn_error.log"
# Start Frontend
echo -e "${GREEN}[2/2] Starting Frontend on port $FRONTEND_PORT...${NC}"
cd "$FRONTEND_DIR"
# Check if node_modules exists
if [ ! -d "node_modules" ]; then
echo -e "${YELLOW}Node modules not found. Installing...${NC}"
npm install
fi
# Check if .next exists (build directory)
if [ ! -d ".next" ]; then
echo -e "${YELLOW}Frontend not built. Building...${NC}"
# Use production environment for build
NODE_ENV=production PORT=$FRONTEND_PORT npm run build
fi
# Create .env.production if it doesn't exist
if [ ! -f ".env.production" ]; then
echo -e "${BLUE}Creating .env.production file...${NC}"
cat > .env.production << EOF
NEXT_PUBLIC_SITE_URL=https://gnxsoft.com
NEXT_PUBLIC_API_URL=
PORT=$FRONTEND_PORT
NODE_ENV=production
NEXT_TELEMETRY_DISABLED=1
EOF
echo -e "${GREEN}✓ Created .env.production${NC}"
else
# Update PORT if it exists but is different
if ! grep -q "^PORT=$FRONTEND_PORT" .env.production; then
echo -e "${BLUE}Updating PORT in .env.production...${NC}"
if grep -q "^PORT=" .env.production; then
sed -i "s|^PORT=.*|PORT=$FRONTEND_PORT|" .env.production
else
echo "PORT=$FRONTEND_PORT" >> .env.production
fi
echo -e "${GREEN}✓ Updated PORT in .env.production${NC}"
fi
# Ensure NODE_ENV is set to production
if ! grep -q "^NODE_ENV=production" .env.production; then
if grep -q "^NODE_ENV=" .env.production; then
sed -i "s|^NODE_ENV=.*|NODE_ENV=production|" .env.production
else
echo "NODE_ENV=production" >> .env.production
fi
fi
fi
# Check if Next.js is using standalone output mode
if grep -q '"output":\s*"standalone"' next.config.js 2>/dev/null || grep -q "output:.*'standalone'" next.config.js 2>/dev/null; then
echo -e "${BLUE}Detected standalone mode. Starting with standalone server...${NC}"
# Check if standalone server exists
if [ ! -f ".next/standalone/server.js" ]; then
echo -e "${YELLOW}Standalone server not found. Rebuilding...${NC}"
NODE_ENV=production PORT=$FRONTEND_PORT npm run build
fi
# Start standalone server with PM2
PORT=$FRONTEND_PORT NODE_ENV=production pm2 start node \
--name "gnxsoft-frontend" \
--cwd "$FRONTEND_DIR" \
-- \
".next/standalone/server.js"
else
# Standard Next.js start
PORT=$FRONTEND_PORT NODE_ENV=production pm2 start npm \
--name "gnxsoft-frontend" \
-- start
fi
# Save PM2 configuration
pm2 save
echo ""
echo -e "${GREEN}=========================================="
echo "Services Started Successfully!"
echo "==========================================${NC}"
echo ""
echo -e "${BLUE}Backend:${NC} http://127.0.0.1:$BACKEND_PORT"
echo -e "${BLUE}Frontend:${NC} http://127.0.0.1:$FRONTEND_PORT"
echo ""
echo "PM2 Commands:"
echo " pm2 status - Check service status"
echo " pm2 logs gnxsoft-backend - View backend logs"
echo " pm2 logs gnxsoft-frontend - View frontend logs"
echo " pm2 restart all - Restart all services"
echo " pm2 stop all - Stop all services"
echo " pm2 delete all - Remove all services"
echo ""
echo -e "${YELLOW}Note: Make sure to configure nginx to proxy to these ports${NC}"
echo ""

17
stop-services.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# GNX-WEB Service Stop Script
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${GREEN}Stopping GNX-WEB services...${NC}"
# Stop PM2 services
pm2 stop gnxsoft-backend 2>/dev/null || echo -e "${YELLOW}Backend service not running${NC}"
pm2 stop gnxsoft-frontend 2>/dev/null || echo -e "${YELLOW}Frontend service not running${NC}"
echo -e "${GREEN}Services stopped${NC}"

34
switch-to-sqlite.sh Normal file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
# Switch Django backend to use SQLite instead of PostgreSQL
BACKEND_DIR="/var/www/GNX-WEB/backEnd"
BACKEND_ENV="$BACKEND_DIR/.env"
echo "Switching to SQLite database..."
if [ -f "$BACKEND_ENV" ]; then
# Comment out or remove DATABASE_URL line
if grep -q "^DATABASE_URL=" "$BACKEND_ENV"; then
echo "Commenting out DATABASE_URL to use SQLite..."
sed -i 's|^DATABASE_URL=.*|# DATABASE_URL= # Using SQLite instead|' "$BACKEND_ENV"
echo "✓ DATABASE_URL commented out"
fi
# Ensure db.sqlite3 path is correct (it should be in backEnd directory)
echo ""
echo "SQLite database will be at: $BACKEND_DIR/db.sqlite3"
echo ""
echo "Restarting backend to apply changes..."
pm2 restart gnxsoft-backend
echo "✓ Backend restarted"
echo ""
echo "Checking database connection..."
cd "$BACKEND_DIR"
source venv/bin/activate
python manage.py check --database default
else
echo "Error: .env file not found at $BACKEND_ENV"
exit 1
fi

View File

@@ -0,0 +1,27 @@
[Unit]
Description=GNX-WEB Django Backend (Gunicorn)
After=network.target postgresql.service
Requires=postgresql.service
[Service]
Type=notify
User=gnx
Group=gnx
WorkingDirectory=/home/gnx/Desktop/GNX-WEB/backEnd
Environment="PATH=/home/gnx/Desktop/GNX-WEB/backEnd/venv/bin"
EnvironmentFile=/home/gnx/Desktop/GNX-WEB/backEnd/.env
ExecStart=/home/gnx/Desktop/GNX-WEB/backEnd/venv/bin/gunicorn \
--bind 127.0.0.1:1086 \
--workers 3 \
--timeout 120 \
--access-logfile /home/gnx/Desktop/GNX-WEB/backEnd/logs/gunicorn_access.log \
--error-logfile /home/gnx/Desktop/GNX-WEB/backEnd/logs/gunicorn_error.log \
--log-level info \
gnx.wsgi:application
ExecReload=/bin/kill -s HUP $MAINPID
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,22 @@
[Unit]
Description=GNX-WEB Next.js Frontend
After=network.target
[Service]
Type=simple
User=gnx
Group=gnx
WorkingDirectory=/home/gnx/Desktop/GNX-WEB/frontEnd
Environment="NODE_ENV=production"
Environment="PORT=1087"
Environment="NEXT_TELEMETRY_DISABLED=1"
EnvironmentFile=/home/gnx/Desktop/GNX-WEB/frontEnd/.env.production
ExecStart=/usr/bin/npm start
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

80
update-keys.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/bin/bash
# GNX-WEB Key Update Script
# Regenerates and updates SECRET_KEY and INTERNAL_API_KEY
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Get script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BACKEND_DIR="$SCRIPT_DIR/backEnd"
# Function to generate secure random key
generate_secret_key() {
python3 -c "import secrets; print(secrets.token_urlsafe($1))" 2>/dev/null || \
openssl rand -base64 $((($1 * 3) / 4)) | tr -d '\n' | head -c $1
}
echo -e "${BLUE}=========================================="
echo "GNX-WEB Key Update Script"
echo "==========================================${NC}"
echo ""
# Check if .env file exists
if [ ! -f "$BACKEND_DIR/.env" ]; then
echo -e "${RED}Error: .env file not found at $BACKEND_DIR/.env${NC}"
echo -e "${YELLOW}Please run deploy.sh first or create .env manually${NC}"
exit 1
fi
# Generate new keys
echo -e "${BLUE}Generating new secure keys...${NC}"
SECRET_KEY=$(generate_secret_key 50)
INTERNAL_API_KEY=$(generate_secret_key 32)
# Update .env file
echo -e "${BLUE}Updating .env file...${NC}"
sed -i "s|^SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$BACKEND_DIR/.env"
sed -i "s|^INTERNAL_API_KEY=.*|INTERNAL_API_KEY=$INTERNAL_API_KEY|" "$BACKEND_DIR/.env"
echo -e "${GREEN}✓ Updated SECRET_KEY${NC}"
echo -e "${GREEN}✓ Updated INTERNAL_API_KEY${NC}"
# Update nginx config if it exists
if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then
echo -e "${BLUE}Updating nginx configuration...${NC}"
escaped_key=$(echo "$INTERNAL_API_KEY" | sed 's/[[\.*^$()+?{|]/\\&/g')
sudo sed -i "s|set \$api_key \".*\";|set \$api_key \"$escaped_key\";|g" /etc/nginx/sites-available/gnxsoft
echo -e "${GREEN}✓ Updated nginx config with INTERNAL_API_KEY${NC}"
# Test nginx configuration
if sudo nginx -t >/dev/null 2>&1; then
echo -e "${GREEN}✓ Nginx configuration is valid${NC}"
echo -e "${YELLOW}Reload nginx with: sudo systemctl reload nginx${NC}"
else
echo -e "${RED}✗ Nginx configuration has errors${NC}"
echo -e "${YELLOW}Please check manually: sudo nginx -t${NC}"
fi
else
echo -e "${YELLOW}⚠ Nginx config not found. Update manually if needed.${NC}"
fi
echo ""
echo -e "${GREEN}=========================================="
echo "Keys Updated Successfully!"
echo "==========================================${NC}"
echo ""
echo -e "${BLUE}New Keys:${NC}"
echo -e "${GREEN}SECRET_KEY: ${SECRET_KEY:0:30}...${NC}"
echo -e "${GREEN}INTERNAL_API_KEY: ${INTERNAL_API_KEY:0:30}...${NC}"
echo ""
echo -e "${YELLOW}Note: You may need to restart services for changes to take effect${NC}"
echo -e "${YELLOW}Run: ./restart-services.sh${NC}"
echo ""

283
verify-deployment.sh Executable file
View File

@@ -0,0 +1,283 @@
#!/bin/bash
# GNX-WEB Deployment Verification Script
# Checks if all components are properly configured and running
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}=========================================="
echo "GNX-WEB Deployment Verification"
echo "==========================================${NC}"
echo ""
ERRORS=0
WARNINGS=0
# Function to check if command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Function to check if port is listening
port_listening() {
local port=$1
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || netstat -tlnp 2>/dev/null | grep -q ":$port " || ss -tlnp 2>/dev/null | grep -q ":$port "; then
return 0
else
return 1
fi
}
# Check required commands
echo -e "${BLUE}Checking required commands...${NC}"
for cmd in python3 node npm nginx psql; do
if command_exists $cmd; then
echo -e "${GREEN}${NC} $cmd is installed"
else
echo -e "${RED}${NC} $cmd is NOT installed"
((ERRORS++))
fi
done
# Check PM2
if command_exists pm2; then
echo -e "${GREEN}${NC} PM2 is installed"
else
echo -e "${YELLOW}${NC} PM2 is not installed (recommended for process management)"
((WARNINGS++))
fi
echo ""
# Check backend
echo -e "${BLUE}Checking Backend...${NC}"
if [ -f "backEnd/.env" ]; then
echo -e "${GREEN}${NC} Backend .env file exists"
# Check for critical variables
if grep -q "SECRET_KEY=" backEnd/.env && ! grep -q "your-super-secret" backEnd/.env; then
echo -e "${GREEN}${NC} SECRET_KEY is set"
else
echo -e "${RED}${NC} SECRET_KEY not properly configured"
((ERRORS++))
fi
if grep -q "INTERNAL_API_KEY=" backEnd/.env && ! grep -q "PLACEHOLDER\|your-secure-api-key" backEnd/.env; then
echo -e "${GREEN}${NC} INTERNAL_API_KEY is set"
else
echo -e "${RED}${NC} INTERNAL_API_KEY not properly configured"
((ERRORS++))
fi
if grep -q "DATABASE_URL=" backEnd/.env && ! grep -q "your_password_here" backEnd/.env; then
echo -e "${GREEN}${NC} DATABASE_URL is configured"
else
echo -e "${YELLOW}${NC} DATABASE_URL may not be configured"
((WARNINGS++))
fi
else
echo -e "${RED}${NC} Backend .env file not found"
((ERRORS++))
fi
if [ -d "backEnd/venv" ]; then
echo -e "${GREEN}${NC} Backend virtual environment exists"
else
echo -e "${YELLOW}${NC} Backend virtual environment not found"
((WARNINGS++))
fi
if port_listening 1086; then
echo -e "${GREEN}${NC} Backend is running on port 1086"
else
echo -e "${YELLOW}${NC} Backend is not running on port 1086"
((WARNINGS++))
fi
echo ""
# Check frontend
echo -e "${BLUE}Checking Frontend...${NC}"
if [ -f "frontEnd/.env.production" ]; then
echo -e "${GREEN}${NC} Frontend .env.production exists"
else
echo -e "${YELLOW}${NC} Frontend .env.production not found"
((WARNINGS++))
fi
if [ -d "frontEnd/node_modules" ]; then
echo -e "${GREEN}${NC} Frontend node_modules exists"
else
echo -e "${YELLOW}${NC} Frontend node_modules not found (run npm install)"
((WARNINGS++))
fi
if [ -d "frontEnd/.next" ]; then
echo -e "${GREEN}${NC} Frontend build exists"
else
echo -e "${YELLOW}${NC} Frontend not built (run npm run build)"
((WARNINGS++))
fi
if port_listening 1087; then
echo -e "${GREEN}${NC} Frontend is running on port 1087"
else
echo -e "${YELLOW}${NC} Frontend is not running on port 1087"
((WARNINGS++))
fi
echo ""
# Check database
echo -e "${BLUE}Checking Database...${NC}"
if port_listening 5433; then
echo -e "${GREEN}${NC} PostgreSQL is running on port 5433"
else
echo -e "${YELLOW}${NC} PostgreSQL is not running on port 5433"
((WARNINGS++))
fi
if command_exists psql; then
DB_URL=$(grep "^DATABASE_URL=" backEnd/.env 2>/dev/null | cut -d'=' -f2-)
if [ -n "$DB_URL" ] && [[ "$DB_URL" == postgresql://* ]]; then
# Extract components from postgresql://user:password@host:port/database
DB_USER=$(echo "$DB_URL" | sed -n 's|.*://\([^:]*\):.*|\1|p')
DB_PASS=$(echo "$DB_URL" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p')
DB_HOST=$(echo "$DB_URL" | sed -n 's|.*@\([^:]*\):.*|\1|p')
DB_PORT=$(echo "$DB_URL" | sed -n 's|.*:\([0-9]*\)/.*|\1|p')
DB_NAME=$(echo "$DB_URL" | sed -n 's|.*/\([^?]*\).*|\1|p')
if [ -n "$DB_USER" ] && [ -n "$DB_PASS" ] && [ -n "$DB_NAME" ]; then
if PGPASSWORD="$DB_PASS" psql -h "${DB_HOST:-localhost}" -p "${DB_PORT:-5433}" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1;" >/dev/null 2>&1; then
echo -e "${GREEN}${NC} Database connection successful"
else
echo -e "${YELLOW}${NC} Could not verify database connection (check credentials)"
((WARNINGS++))
fi
else
echo -e "${YELLOW}${NC} Could not parse DATABASE_URL for connection test"
((WARNINGS++))
fi
else
echo -e "${YELLOW}${NC} DATABASE_URL not found or invalid format"
((WARNINGS++))
fi
fi
echo ""
# Check nginx
echo -e "${BLUE}Checking Nginx...${NC}"
if [ -f "/etc/nginx/sites-available/gnxsoft" ]; then
echo -e "${GREEN}${NC} Nginx configuration exists"
if [ -L "/etc/nginx/sites-enabled/gnxsoft" ]; then
echo -e "${GREEN}${NC} Nginx site is enabled"
else
echo -e "${YELLOW}${NC} Nginx site is not enabled"
((WARNINGS++))
fi
if grep -q "PLACEHOLDER_INTERNAL_API_KEY" /etc/nginx/sites-available/gnxsoft; then
echo -e "${RED}${NC} Nginx config still has PLACEHOLDER_INTERNAL_API_KEY"
((ERRORS++))
else
echo -e "${GREEN}${NC} Nginx INTERNAL_API_KEY is configured"
fi
else
echo -e "${YELLOW}${NC} Nginx configuration not found"
((WARNINGS++))
fi
if systemctl is-active --quiet nginx 2>/dev/null; then
echo -e "${GREEN}${NC} Nginx is running"
else
echo -e "${YELLOW}${NC} Nginx is not running"
((WARNINGS++))
fi
if port_listening 80; then
echo -e "${GREEN}${NC} HTTP port 80 is listening"
else
echo -e "${YELLOW}${NC} HTTP port 80 is not listening"
((WARNINGS++))
fi
if port_listening 443; then
echo -e "${GREEN}${NC} HTTPS port 443 is listening"
else
echo -e "${YELLOW}${NC} HTTPS port 443 is not listening"
((WARNINGS++))
fi
echo ""
# Check firewall
echo -e "${BLUE}Checking Firewall...${NC}"
if command_exists ufw; then
if ufw status | grep -q "Status: active"; then
echo -e "${GREEN}${NC} UFW firewall is active"
else
echo -e "${YELLOW}${NC} UFW firewall is not active"
((WARNINGS++))
fi
else
echo -e "${YELLOW}${NC} UFW not found (firewall may be managed differently)"
((WARNINGS++))
fi
echo ""
# Check PM2 services
if command_exists pm2; then
echo -e "${BLUE}Checking PM2 Services...${NC}"
if pm2 list | grep -q "gnxsoft-backend"; then
if pm2 list | grep -q "gnxsoft-backend.*online"; then
echo -e "${GREEN}${NC} Backend service is running in PM2"
else
echo -e "${YELLOW}${NC} Backend service exists but may not be online"
((WARNINGS++))
fi
else
echo -e "${YELLOW}${NC} Backend service not found in PM2"
((WARNINGS++))
fi
if pm2 list | grep -q "gnxsoft-frontend"; then
if pm2 list | grep -q "gnxsoft-frontend.*online"; then
echo -e "${GREEN}${NC} Frontend service is running in PM2"
else
echo -e "${YELLOW}${NC} Frontend service exists but may not be online"
((WARNINGS++))
fi
else
echo -e "${YELLOW}${NC} Frontend service not found in PM2"
((WARNINGS++))
fi
fi
echo ""
echo -e "${BLUE}=========================================="
echo "Verification Summary"
echo "==========================================${NC}"
if [ $ERRORS -eq 0 ] && [ $WARNINGS -eq 0 ]; then
echo -e "${GREEN}✓ All checks passed!${NC}"
exit 0
elif [ $ERRORS -eq 0 ]; then
echo -e "${YELLOW}$WARNINGS warning(s) found${NC}"
echo -e "${GREEN}✓ No critical errors${NC}"
exit 0
else
echo -e "${RED}$ERRORS error(s) found${NC}"
if [ $WARNINGS -gt 0 ]; then
echo -e "${YELLOW}$WARNINGS warning(s) found${NC}"
fi
exit 1
fi