template
This commit is contained in:
43
backend/.env.example
Normal file
43
backend/.env.example
Normal file
@@ -0,0 +1,43 @@
|
||||
# =============================================================================
|
||||
# ENVIRONMENT VARIABLES
|
||||
# =============================================================================
|
||||
# Copy this file to .env and fill in your values
|
||||
# NEVER commit .env to version control!
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SERVER CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
PORT=8000
|
||||
ENVIRONMENT=development
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SUPABASE CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
# Get these from: https://app.supabase.com/project/_/settings/api
|
||||
SUPABASE_URL=https://your-project.supabase.co
|
||||
SUPABASE_SERVICE_KEY=your-service-role-key-here
|
||||
# Note: Use SERVICE KEY (not anon key) for backend - has admin access
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# AI PROVIDER CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
# Choose your AI provider and add the appropriate key
|
||||
|
||||
# OpenAI (GPT-4, GPT-3.5)
|
||||
OPENAI_API_KEY=sk-your-openai-key-here
|
||||
|
||||
# Anthropic (Claude)
|
||||
# ANTHROPIC_API_KEY=sk-ant-your-anthropic-key-here
|
||||
|
||||
# Default AI provider to use: "openai" or "anthropic"
|
||||
AI_PROVIDER=openai
|
||||
|
||||
# Default model to use
|
||||
AI_MODEL=gpt-3.5-turbo
|
||||
# Other options: gpt-4, gpt-4-turbo, claude-3-opus-20240229, claude-3-sonnet-20240229
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# CORS CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
# Comma-separated list of allowed origins
|
||||
CORS_ORIGINS=http://localhost:3000,http://127.0.0.1:3000
|
||||
83
backend/config.py
Normal file
83
backend/config.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
=============================================================================
|
||||
CONFIGURATION MODULE
|
||||
=============================================================================
|
||||
|
||||
This module handles all configuration using Pydantic Settings.
|
||||
|
||||
Benefits of using Pydantic Settings:
|
||||
- Type validation for environment variables
|
||||
- Auto-loading from .env files
|
||||
- IDE autocomplete and type hints
|
||||
- Clear documentation of all required config
|
||||
|
||||
Usage:
|
||||
from config import settings
|
||||
print(settings.OPENAI_API_KEY)
|
||||
"""
|
||||
|
||||
from pydantic_settings import BaseSettings
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""
|
||||
Application settings loaded from environment variables.
|
||||
|
||||
All settings can be overridden via:
|
||||
1. Environment variables
|
||||
2. .env file in the backend directory
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Server Configuration
|
||||
# -------------------------------------------------------------------------
|
||||
PORT: int = 8000
|
||||
ENVIRONMENT: str = "development" # "development", "staging", "production"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Supabase Configuration
|
||||
# -------------------------------------------------------------------------
|
||||
SUPABASE_URL: str = ""
|
||||
SUPABASE_SERVICE_KEY: str = "" # Service key for admin access
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# AI Configuration
|
||||
# -------------------------------------------------------------------------
|
||||
OPENAI_API_KEY: str = ""
|
||||
ANTHROPIC_API_KEY: str = ""
|
||||
AI_PROVIDER: str = "openai" # "openai" or "anthropic"
|
||||
AI_MODEL: str = "gpt-3.5-turbo"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# CORS Configuration
|
||||
# -------------------------------------------------------------------------
|
||||
CORS_ORIGINS: str = "http://localhost:3000" # Comma-separated list
|
||||
|
||||
@property
|
||||
def cors_origins_list(self) -> list[str]:
|
||||
"""Parse CORS_ORIGINS string into a list."""
|
||||
return [origin.strip() for origin in self.CORS_ORIGINS.split(",")]
|
||||
|
||||
class Config:
|
||||
# Load from .env file
|
||||
env_file = ".env"
|
||||
env_file_encoding = "utf-8"
|
||||
# Make field names case-insensitive
|
||||
case_sensitive = False
|
||||
|
||||
|
||||
# Use lru_cache to create a singleton settings instance
|
||||
@lru_cache
|
||||
def get_settings() -> Settings:
|
||||
"""
|
||||
Get cached settings instance.
|
||||
|
||||
Using lru_cache ensures we only read the .env file once,
|
||||
improving performance.
|
||||
"""
|
||||
return Settings()
|
||||
|
||||
|
||||
# Export settings instance for easy import
|
||||
settings = get_settings()
|
||||
130
backend/main.py
Normal file
130
backend/main.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
=============================================================================
|
||||
FASTAPI MAIN APPLICATION
|
||||
=============================================================================
|
||||
|
||||
This is the entry point for the FastAPI backend.
|
||||
|
||||
FastAPI Features:
|
||||
- Automatic API documentation (Swagger UI at /docs)
|
||||
- Request validation with Pydantic
|
||||
- Async support for high performance
|
||||
- Easy dependency injection
|
||||
|
||||
To run the server:
|
||||
uvicorn main:app --reload --port 8000
|
||||
|
||||
Or use the provided run script:
|
||||
python run.py
|
||||
"""
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
# Import configuration
|
||||
from config import settings
|
||||
|
||||
# Import routers (API endpoints grouped by feature)
|
||||
from routers import ai, projects, health
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# APPLICATION LIFECYCLE
|
||||
# =============================================================================
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""
|
||||
Manage application lifecycle events.
|
||||
|
||||
This runs code on startup (before yield) and shutdown (after yield).
|
||||
Great for:
|
||||
- Initializing database connections
|
||||
- Loading ML models
|
||||
- Setting up caches
|
||||
"""
|
||||
# Startup
|
||||
print("🚀 Starting Hack Nation Backend...")
|
||||
print(f"📍 Environment: {settings.ENVIRONMENT}")
|
||||
print(f"🤖 AI Provider: {settings.AI_PROVIDER} ({settings.AI_MODEL})")
|
||||
|
||||
yield # Application runs here
|
||||
|
||||
# Shutdown
|
||||
print("👋 Shutting down...")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# CREATE APPLICATION
|
||||
# =============================================================================
|
||||
|
||||
app = FastAPI(
|
||||
title="Hack Nation API",
|
||||
description="""
|
||||
Backend API for Hack Nation hackathon template.
|
||||
|
||||
Features:
|
||||
- AI text generation (OpenAI / Anthropic)
|
||||
- Supabase database integration
|
||||
- User authentication
|
||||
""",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# MIDDLEWARE
|
||||
# =============================================================================
|
||||
|
||||
# CORS - Allow frontend to call API
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=settings.cors_origins_list, # Origins allowed to call API
|
||||
allow_credentials=True, # Allow cookies/auth headers
|
||||
allow_methods=["*"], # Allow all HTTP methods
|
||||
allow_headers=["*"], # Allow all headers
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ROUTERS
|
||||
# =============================================================================
|
||||
|
||||
# Include routers with prefixes
|
||||
# Each router handles a specific feature area
|
||||
app.include_router(
|
||||
health.router,
|
||||
prefix="/api",
|
||||
tags=["Health"],
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
ai.router,
|
||||
prefix="/api/ai",
|
||||
tags=["AI"],
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
projects.router,
|
||||
prefix="/api/projects",
|
||||
tags=["Projects"],
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ROOT ENDPOINT
|
||||
# =============================================================================
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""
|
||||
Root endpoint - useful for health checks and API info.
|
||||
"""
|
||||
return {
|
||||
"name": "Hack Nation API",
|
||||
"version": "1.0.0",
|
||||
"docs": "/docs",
|
||||
"health": "/api/health",
|
||||
}
|
||||
22
backend/requirements.txt
Normal file
22
backend/requirements.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
# =============================================================================
|
||||
# FASTAPI BACKEND - REQUIREMENTS
|
||||
# =============================================================================
|
||||
# Install with: pip install -r requirements.txt
|
||||
# Or: pip install fastapi uvicorn openai python-dotenv supabase pydantic
|
||||
|
||||
# Web Framework
|
||||
fastapi>=0.115.0
|
||||
uvicorn[standard]>=0.30.0
|
||||
|
||||
# AI Integration
|
||||
openai>=1.50.0 # OpenAI API client
|
||||
anthropic>=0.39.0 # Anthropic Claude API (alternative)
|
||||
|
||||
# Database (Supabase)
|
||||
supabase>=2.10.0 # Supabase Python client
|
||||
|
||||
# Utilities
|
||||
python-dotenv>=1.0.0 # Load .env files
|
||||
pydantic>=2.9.0 # Data validation
|
||||
pydantic-settings>=2.5.0 # Settings management
|
||||
httpx>=0.27.0 # Async HTTP client
|
||||
10
backend/routers/__init__.py
Normal file
10
backend/routers/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Routers Package
|
||||
|
||||
This package contains all API route handlers organized by feature.
|
||||
Each router is responsible for a specific domain of the application.
|
||||
"""
|
||||
|
||||
from . import ai, projects, health
|
||||
|
||||
__all__ = ["ai", "projects", "health"]
|
||||
171
backend/routers/ai.py
Normal file
171
backend/routers/ai.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""
|
||||
=============================================================================
|
||||
AI ROUTER
|
||||
=============================================================================
|
||||
|
||||
This router handles all AI-related endpoints.
|
||||
|
||||
Features:
|
||||
- Text generation with OpenAI or Anthropic
|
||||
- Streaming responses for real-time output
|
||||
- Multiple AI models support
|
||||
|
||||
Architecture:
|
||||
- Endpoints are thin - they just validate input and return output
|
||||
- Business logic is in the services module
|
||||
- This separation makes testing easier
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
|
||||
# Import our AI service
|
||||
from services.ai_service import generate_text, stream_text, chat_completion
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# REQUEST/RESPONSE MODELS
|
||||
# =============================================================================
|
||||
|
||||
class GenerateRequest(BaseModel):
|
||||
"""Request body for text generation."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000)
|
||||
max_tokens: Optional[int] = Field(default=500, ge=1, le=4000)
|
||||
temperature: Optional[float] = Field(default=0.7, ge=0, le=2)
|
||||
|
||||
|
||||
class GenerateResponse(BaseModel):
|
||||
"""Response from text generation."""
|
||||
text: str
|
||||
model: str
|
||||
usage: Optional[dict] = None
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
"""A single message in a chat conversation."""
|
||||
role: str = Field(..., pattern="^(user|assistant|system)$")
|
||||
content: str = Field(..., min_length=1)
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
"""Request body for chat completion."""
|
||||
messages: list[ChatMessage] = Field(..., min_items=1)
|
||||
max_tokens: Optional[int] = Field(default=500, ge=1, le=4000)
|
||||
temperature: Optional[float] = Field(default=0.7, ge=0, le=2)
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
"""Response from chat completion."""
|
||||
message: ChatMessage
|
||||
model: str
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ENDPOINTS
|
||||
# =============================================================================
|
||||
|
||||
@router.post("/generate", response_model=GenerateResponse)
|
||||
async def generate_endpoint(request: GenerateRequest):
|
||||
"""
|
||||
Generate text from a prompt.
|
||||
|
||||
This is a simple completion endpoint - give it a prompt,
|
||||
get back generated text.
|
||||
|
||||
Example:
|
||||
POST /api/ai/generate
|
||||
{"prompt": "Write a haiku about coding"}
|
||||
"""
|
||||
try:
|
||||
result = await generate_text(
|
||||
prompt=request.prompt,
|
||||
max_tokens=request.max_tokens,
|
||||
temperature=request.temperature,
|
||||
)
|
||||
return GenerateResponse(**result)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/chat", response_model=ChatResponse)
|
||||
async def chat_endpoint(request: ChatRequest):
|
||||
"""
|
||||
Chat completion with message history.
|
||||
|
||||
Send a list of messages (conversation history) and get
|
||||
the assistant's response.
|
||||
|
||||
Example:
|
||||
POST /api/ai/chat
|
||||
{
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Convert Pydantic models to dicts
|
||||
messages = [m.model_dump() for m in request.messages]
|
||||
|
||||
result = await chat_completion(
|
||||
messages=messages,
|
||||
max_tokens=request.max_tokens,
|
||||
temperature=request.temperature,
|
||||
)
|
||||
return ChatResponse(
|
||||
message=ChatMessage(**result["message"]),
|
||||
model=result["model"],
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/stream")
|
||||
async def stream_endpoint(request: GenerateRequest):
|
||||
"""
|
||||
Stream generated text in real-time.
|
||||
|
||||
Uses Server-Sent Events (SSE) to stream the response
|
||||
token by token. Great for chat interfaces!
|
||||
|
||||
The frontend can read this with:
|
||||
const response = await fetch("/api/ai/stream", {...});
|
||||
const reader = response.body.getReader();
|
||||
"""
|
||||
try:
|
||||
return StreamingResponse(
|
||||
stream_text(
|
||||
prompt=request.prompt,
|
||||
max_tokens=request.max_tokens,
|
||||
temperature=request.temperature,
|
||||
),
|
||||
media_type="text/event-stream",
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/models")
|
||||
async def list_models():
|
||||
"""
|
||||
List available AI models.
|
||||
|
||||
Useful for letting users choose which model to use.
|
||||
"""
|
||||
return {
|
||||
"openai": [
|
||||
{"id": "gpt-4-turbo", "name": "GPT-4 Turbo", "description": "Most capable model"},
|
||||
{"id": "gpt-4", "name": "GPT-4", "description": "High quality responses"},
|
||||
{"id": "gpt-3.5-turbo", "name": "GPT-3.5 Turbo", "description": "Fast and efficient"},
|
||||
],
|
||||
"anthropic": [
|
||||
{"id": "claude-3-opus-20240229", "name": "Claude 3 Opus", "description": "Most capable"},
|
||||
{"id": "claude-3-sonnet-20240229", "name": "Claude 3 Sonnet", "description": "Balanced"},
|
||||
{"id": "claude-3-haiku-20240307", "name": "Claude 3 Haiku", "description": "Fast"},
|
||||
],
|
||||
}
|
||||
62
backend/routers/health.py
Normal file
62
backend/routers/health.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
=============================================================================
|
||||
HEALTH CHECK ROUTER
|
||||
=============================================================================
|
||||
|
||||
Simple health check endpoints for monitoring and deployment.
|
||||
|
||||
These are useful for:
|
||||
- Load balancer health checks
|
||||
- Kubernetes readiness probes
|
||||
- Monitoring systems
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter
|
||||
from config import settings
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def health_check():
|
||||
"""
|
||||
Basic health check endpoint.
|
||||
Returns 200 if the server is running.
|
||||
"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"environment": settings.ENVIRONMENT,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health/detailed")
|
||||
async def detailed_health_check():
|
||||
"""
|
||||
Detailed health check with service status.
|
||||
Useful for debugging connection issues.
|
||||
"""
|
||||
# Check AI provider configuration
|
||||
ai_configured = bool(
|
||||
settings.OPENAI_API_KEY or settings.ANTHROPIC_API_KEY
|
||||
)
|
||||
|
||||
# Check Supabase configuration
|
||||
supabase_configured = bool(
|
||||
settings.SUPABASE_URL and settings.SUPABASE_SERVICE_KEY
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"environment": settings.ENVIRONMENT,
|
||||
"services": {
|
||||
"ai": {
|
||||
"configured": ai_configured,
|
||||
"provider": settings.AI_PROVIDER,
|
||||
"model": settings.AI_MODEL,
|
||||
},
|
||||
"database": {
|
||||
"configured": supabase_configured,
|
||||
"provider": "supabase",
|
||||
},
|
||||
},
|
||||
}
|
||||
191
backend/routers/projects.py
Normal file
191
backend/routers/projects.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""
|
||||
=============================================================================
|
||||
PROJECTS ROUTER
|
||||
=============================================================================
|
||||
|
||||
Example CRUD endpoints for managing projects.
|
||||
|
||||
This demonstrates:
|
||||
- Supabase database integration
|
||||
- CRUD operations (Create, Read, Update, Delete)
|
||||
- Request validation with Pydantic
|
||||
- Error handling patterns
|
||||
|
||||
You can adapt this pattern for any resource in your app.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Header
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
# Import Supabase client
|
||||
from services.database import get_supabase_client
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# REQUEST/RESPONSE MODELS
|
||||
# =============================================================================
|
||||
|
||||
class ProjectCreate(BaseModel):
|
||||
"""Data required to create a new project."""
|
||||
name: str = Field(..., min_length=1, max_length=100)
|
||||
description: Optional[str] = Field(default="", max_length=1000)
|
||||
|
||||
|
||||
class ProjectUpdate(BaseModel):
|
||||
"""Data that can be updated on a project."""
|
||||
name: Optional[str] = Field(None, min_length=1, max_length=100)
|
||||
description: Optional[str] = Field(None, max_length=1000)
|
||||
|
||||
|
||||
class Project(BaseModel):
|
||||
"""Full project model (database representation)."""
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ENDPOINTS
|
||||
# =============================================================================
|
||||
|
||||
@router.get("")
|
||||
async def list_projects(
|
||||
authorization: str = Header(None, description="Bearer token from Supabase")
|
||||
):
|
||||
"""
|
||||
List all projects for the authenticated user.
|
||||
|
||||
Requires authentication - pass the Supabase access token
|
||||
in the Authorization header.
|
||||
"""
|
||||
if not authorization:
|
||||
raise HTTPException(status_code=401, detail="Authorization required")
|
||||
|
||||
try:
|
||||
# Get Supabase client
|
||||
supabase = get_supabase_client()
|
||||
|
||||
# Extract user from token (Supabase handles this)
|
||||
# In production, you'd verify the JWT properly
|
||||
|
||||
# Query projects
|
||||
response = supabase.table("projects").select("*").execute()
|
||||
|
||||
return response.data
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("", response_model=Project)
|
||||
async def create_project(
|
||||
project: ProjectCreate,
|
||||
authorization: str = Header(None),
|
||||
):
|
||||
"""
|
||||
Create a new project.
|
||||
|
||||
Example:
|
||||
POST /api/projects
|
||||
{"name": "My AI App", "description": "An awesome AI project"}
|
||||
"""
|
||||
if not authorization:
|
||||
raise HTTPException(status_code=401, detail="Authorization required")
|
||||
|
||||
try:
|
||||
supabase = get_supabase_client()
|
||||
|
||||
# In production, extract user_id from JWT
|
||||
# For demo, we'll use a placeholder
|
||||
user_id = "demo-user-id"
|
||||
|
||||
response = supabase.table("projects").insert({
|
||||
"name": project.name,
|
||||
"description": project.description or "",
|
||||
"user_id": user_id,
|
||||
}).execute()
|
||||
|
||||
return response.data[0]
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/{project_id}", response_model=Project)
|
||||
async def get_project(project_id: str):
|
||||
"""
|
||||
Get a specific project by ID.
|
||||
"""
|
||||
try:
|
||||
supabase = get_supabase_client()
|
||||
|
||||
response = supabase.table("projects").select("*").eq("id", project_id).single().execute()
|
||||
|
||||
if not response.data:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
return response.data
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.patch("/{project_id}", response_model=Project)
|
||||
async def update_project(
|
||||
project_id: str,
|
||||
project: ProjectUpdate,
|
||||
authorization: str = Header(None),
|
||||
):
|
||||
"""
|
||||
Update a project.
|
||||
|
||||
Only send the fields you want to update.
|
||||
"""
|
||||
if not authorization:
|
||||
raise HTTPException(status_code=401, detail="Authorization required")
|
||||
|
||||
try:
|
||||
supabase = get_supabase_client()
|
||||
|
||||
# Build update dict with only provided fields
|
||||
update_data = project.model_dump(exclude_unset=True)
|
||||
|
||||
if not update_data:
|
||||
raise HTTPException(status_code=400, detail="No fields to update")
|
||||
|
||||
response = supabase.table("projects").update(update_data).eq("id", project_id).execute()
|
||||
|
||||
if not response.data:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
return response.data[0]
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.delete("/{project_id}")
|
||||
async def delete_project(
|
||||
project_id: str,
|
||||
authorization: str = Header(None),
|
||||
):
|
||||
"""
|
||||
Delete a project.
|
||||
"""
|
||||
if not authorization:
|
||||
raise HTTPException(status_code=401, detail="Authorization required")
|
||||
|
||||
try:
|
||||
supabase = get_supabase_client()
|
||||
|
||||
response = supabase.table("projects").delete().eq("id", project_id).execute()
|
||||
|
||||
return {"message": "Project deleted", "id": project_id}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
23
backend/run.py
Normal file
23
backend/run.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
=============================================================================
|
||||
RUN SCRIPT
|
||||
=============================================================================
|
||||
|
||||
Simple script to run the FastAPI server.
|
||||
Useful for development and debugging.
|
||||
|
||||
Usage:
|
||||
python run.py
|
||||
"""
|
||||
|
||||
import uvicorn
|
||||
from config import settings
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host="0.0.0.0",
|
||||
port=settings.PORT,
|
||||
reload=settings.ENVIRONMENT == "development", # Auto-reload in dev
|
||||
log_level="info",
|
||||
)
|
||||
10
backend/services/__init__.py
Normal file
10
backend/services/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Services Package
|
||||
|
||||
This package contains business logic and external service integrations.
|
||||
Keeping business logic separate from routes makes the code more testable.
|
||||
"""
|
||||
|
||||
from . import ai_service, database
|
||||
|
||||
__all__ = ["ai_service", "database"]
|
||||
250
backend/services/ai_service.py
Normal file
250
backend/services/ai_service.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""
|
||||
=============================================================================
|
||||
AI SERVICE
|
||||
=============================================================================
|
||||
|
||||
This module handles all AI provider integrations.
|
||||
|
||||
Supported Providers:
|
||||
- OpenAI (GPT-4, GPT-3.5)
|
||||
- Anthropic (Claude)
|
||||
|
||||
Architecture:
|
||||
- Provider-agnostic interface (same API regardless of provider)
|
||||
- Easy to add new providers
|
||||
- Streaming support for real-time responses
|
||||
|
||||
The actual API keys and configuration come from config.py
|
||||
"""
|
||||
|
||||
from typing import AsyncGenerator, Optional
|
||||
import openai
|
||||
from config import settings
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# INITIALIZE CLIENTS
|
||||
# =============================================================================
|
||||
|
||||
# Initialize OpenAI client
|
||||
openai_client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
|
||||
|
||||
# For Anthropic, you would:
|
||||
# import anthropic
|
||||
# anthropic_client = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEXT GENERATION
|
||||
# =============================================================================
|
||||
|
||||
async def generate_text(
|
||||
prompt: str,
|
||||
max_tokens: int = 500,
|
||||
temperature: float = 0.7,
|
||||
model: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Generate text from a prompt using the configured AI provider.
|
||||
|
||||
Args:
|
||||
prompt: The input text to generate from
|
||||
max_tokens: Maximum tokens in the response
|
||||
temperature: Creativity level (0 = deterministic, 2 = very creative)
|
||||
model: Override the default model
|
||||
|
||||
Returns:
|
||||
dict with keys: text, model, usage
|
||||
|
||||
Example:
|
||||
result = await generate_text("Write a haiku about Python")
|
||||
print(result["text"])
|
||||
"""
|
||||
model = model or settings.AI_MODEL
|
||||
|
||||
if settings.AI_PROVIDER == "openai":
|
||||
return await _generate_openai(prompt, max_tokens, temperature, model)
|
||||
elif settings.AI_PROVIDER == "anthropic":
|
||||
return await _generate_anthropic(prompt, max_tokens, temperature, model)
|
||||
else:
|
||||
raise ValueError(f"Unknown AI provider: {settings.AI_PROVIDER}")
|
||||
|
||||
|
||||
async def _generate_openai(
|
||||
prompt: str,
|
||||
max_tokens: int,
|
||||
temperature: float,
|
||||
model: str,
|
||||
) -> dict:
|
||||
"""Generate text using OpenAI."""
|
||||
response = await openai_client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
return {
|
||||
"text": response.choices[0].message.content,
|
||||
"model": model,
|
||||
"usage": {
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
async def _generate_anthropic(
|
||||
prompt: str,
|
||||
max_tokens: int,
|
||||
temperature: float,
|
||||
model: str,
|
||||
) -> dict:
|
||||
"""
|
||||
Generate text using Anthropic Claude.
|
||||
|
||||
NOTE: Uncomment and install anthropic package to use.
|
||||
"""
|
||||
# import anthropic
|
||||
# client = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY)
|
||||
#
|
||||
# response = await client.messages.create(
|
||||
# model=model,
|
||||
# max_tokens=max_tokens,
|
||||
# messages=[{"role": "user", "content": prompt}],
|
||||
# )
|
||||
#
|
||||
# return {
|
||||
# "text": response.content[0].text,
|
||||
# "model": model,
|
||||
# "usage": {
|
||||
# "prompt_tokens": response.usage.input_tokens,
|
||||
# "completion_tokens": response.usage.output_tokens,
|
||||
# },
|
||||
# }
|
||||
|
||||
raise NotImplementedError("Anthropic provider not configured")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# CHAT COMPLETION
|
||||
# =============================================================================
|
||||
|
||||
async def chat_completion(
|
||||
messages: list[dict],
|
||||
max_tokens: int = 500,
|
||||
temperature: float = 0.7,
|
||||
model: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Generate a chat response from message history.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts with 'role' and 'content'
|
||||
max_tokens: Maximum tokens in the response
|
||||
temperature: Creativity level
|
||||
model: Override the default model
|
||||
|
||||
Returns:
|
||||
dict with keys: message, model
|
||||
|
||||
Example:
|
||||
result = await chat_completion([
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"},
|
||||
])
|
||||
print(result["message"]["content"])
|
||||
"""
|
||||
model = model or settings.AI_MODEL
|
||||
|
||||
response = await openai_client.chat.completions.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
return {
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": response.choices[0].message.content,
|
||||
},
|
||||
"model": model,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# STREAMING
|
||||
# =============================================================================
|
||||
|
||||
async def stream_text(
|
||||
prompt: str,
|
||||
max_tokens: int = 500,
|
||||
temperature: float = 0.7,
|
||||
model: Optional[str] = None,
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""
|
||||
Stream generated text token by token.
|
||||
|
||||
This is an async generator that yields text chunks as they're generated.
|
||||
Use this for real-time chat interfaces.
|
||||
|
||||
Example usage in FastAPI:
|
||||
@app.post("/stream")
|
||||
async def stream_endpoint(request: Request):
|
||||
return StreamingResponse(
|
||||
stream_text(request.prompt),
|
||||
media_type="text/event-stream",
|
||||
)
|
||||
"""
|
||||
model = model or settings.AI_MODEL
|
||||
|
||||
response = await openai_client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
stream=True, # Enable streaming
|
||||
)
|
||||
|
||||
# Yield each chunk as it arrives
|
||||
async for chunk in response:
|
||||
if chunk.choices[0].delta.content:
|
||||
yield chunk.choices[0].delta.content
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# UTILITY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
async def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
|
||||
"""
|
||||
Count the number of tokens in a text string.
|
||||
|
||||
Useful for:
|
||||
- Estimating costs before making API calls
|
||||
- Ensuring prompts don't exceed model limits
|
||||
|
||||
Note: This is an approximation. For exact counts, use tiktoken library.
|
||||
"""
|
||||
# Rough approximation: ~4 characters per token for English
|
||||
return len(text) // 4
|
||||
|
||||
|
||||
def get_model_context_limit(model: str) -> int:
|
||||
"""
|
||||
Get the context window size for a model.
|
||||
|
||||
Useful for knowing how much text you can send/receive.
|
||||
"""
|
||||
limits = {
|
||||
# OpenAI
|
||||
"gpt-4-turbo": 128000,
|
||||
"gpt-4": 8192,
|
||||
"gpt-3.5-turbo": 16385,
|
||||
# Anthropic
|
||||
"claude-3-opus-20240229": 200000,
|
||||
"claude-3-sonnet-20240229": 200000,
|
||||
"claude-3-haiku-20240307": 200000,
|
||||
}
|
||||
return limits.get(model, 4096)
|
||||
172
backend/services/database.py
Normal file
172
backend/services/database.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""
|
||||
=============================================================================
|
||||
DATABASE SERVICE (SUPABASE)
|
||||
=============================================================================
|
||||
|
||||
This module provides Supabase database integration.
|
||||
|
||||
Supabase Features Used:
|
||||
- Database: PostgreSQL with auto-generated REST API
|
||||
- Auth: User authentication (handled in frontend mostly)
|
||||
- Real-time: Websocket subscriptions (used in frontend)
|
||||
- Storage: File uploads (not shown here, but easy to add)
|
||||
|
||||
Why Supabase?
|
||||
- Free tier is generous (great for hackathons!)
|
||||
- Built-in auth that works with frontend
|
||||
- Real-time subscriptions out of the box
|
||||
- Full PostgreSQL (can run raw SQL)
|
||||
- Great documentation
|
||||
"""
|
||||
|
||||
from supabase import create_client, Client
|
||||
from config import settings
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_supabase_client() -> Client:
|
||||
"""
|
||||
Get a cached Supabase client instance.
|
||||
|
||||
Using lru_cache ensures we reuse the same client,
|
||||
avoiding connection overhead.
|
||||
|
||||
Note: We use the SERVICE KEY here (not anon key) because
|
||||
the backend needs admin access to the database.
|
||||
The frontend uses the anon key with Row Level Security.
|
||||
"""
|
||||
if not settings.SUPABASE_URL or not settings.SUPABASE_SERVICE_KEY:
|
||||
raise ValueError(
|
||||
"Supabase not configured. "
|
||||
"Set SUPABASE_URL and SUPABASE_SERVICE_KEY in .env"
|
||||
)
|
||||
|
||||
return create_client(
|
||||
settings.SUPABASE_URL,
|
||||
settings.SUPABASE_SERVICE_KEY,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# DATABASE HELPERS
|
||||
# =============================================================================
|
||||
|
||||
async def get_user_by_id(user_id: str) -> dict | None:
|
||||
"""
|
||||
Get a user by their ID.
|
||||
|
||||
Example:
|
||||
user = await get_user_by_id("123-456-789")
|
||||
print(user["email"])
|
||||
"""
|
||||
supabase = get_supabase_client()
|
||||
|
||||
response = supabase.auth.admin.get_user_by_id(user_id)
|
||||
|
||||
return response.user if response else None
|
||||
|
||||
|
||||
async def verify_jwt_token(token: str) -> dict | None:
|
||||
"""
|
||||
Verify a JWT token and get the user.
|
||||
|
||||
Use this to authenticate API requests that include
|
||||
the Supabase access token.
|
||||
|
||||
Example:
|
||||
@app.get("/protected")
|
||||
async def protected_route(authorization: str = Header()):
|
||||
token = authorization.replace("Bearer ", "")
|
||||
user = await verify_jwt_token(token)
|
||||
if not user:
|
||||
raise HTTPException(status_code=401)
|
||||
"""
|
||||
supabase = get_supabase_client()
|
||||
|
||||
try:
|
||||
response = supabase.auth.get_user(token)
|
||||
return response.user if response else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# GENERIC CRUD HELPERS
|
||||
# =============================================================================
|
||||
|
||||
class DatabaseTable:
|
||||
"""
|
||||
Helper class for common database operations.
|
||||
|
||||
Example:
|
||||
projects = DatabaseTable("projects")
|
||||
all_projects = await projects.get_all()
|
||||
project = await projects.get_by_id("123")
|
||||
new_project = await projects.create({"name": "My Project"})
|
||||
"""
|
||||
|
||||
def __init__(self, table_name: str):
|
||||
self.table_name = table_name
|
||||
self.client = get_supabase_client()
|
||||
|
||||
async def get_all(self, filters: dict = None) -> list[dict]:
|
||||
"""Get all records, optionally filtered."""
|
||||
query = self.client.table(self.table_name).select("*")
|
||||
|
||||
if filters:
|
||||
for key, value in filters.items():
|
||||
query = query.eq(key, value)
|
||||
|
||||
response = query.execute()
|
||||
return response.data
|
||||
|
||||
async def get_by_id(self, record_id: str) -> dict | None:
|
||||
"""Get a single record by ID."""
|
||||
response = (
|
||||
self.client.table(self.table_name)
|
||||
.select("*")
|
||||
.eq("id", record_id)
|
||||
.single()
|
||||
.execute()
|
||||
)
|
||||
return response.data
|
||||
|
||||
async def create(self, data: dict) -> dict:
|
||||
"""Create a new record."""
|
||||
response = (
|
||||
self.client.table(self.table_name)
|
||||
.insert(data)
|
||||
.execute()
|
||||
)
|
||||
return response.data[0]
|
||||
|
||||
async def update(self, record_id: str, data: dict) -> dict:
|
||||
"""Update an existing record."""
|
||||
response = (
|
||||
self.client.table(self.table_name)
|
||||
.update(data)
|
||||
.eq("id", record_id)
|
||||
.execute()
|
||||
)
|
||||
return response.data[0] if response.data else None
|
||||
|
||||
async def delete(self, record_id: str) -> bool:
|
||||
"""Delete a record."""
|
||||
self.client.table(self.table_name).delete().eq("id", record_id).execute()
|
||||
return True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# EXAMPLE: Projects Table Helper
|
||||
# =============================================================================
|
||||
|
||||
# Create a helper instance for the projects table
|
||||
projects_db = DatabaseTable("projects")
|
||||
|
||||
# You can now use:
|
||||
# await projects_db.get_all()
|
||||
# await projects_db.get_by_id("123")
|
||||
# await projects_db.create({"name": "Test", "user_id": "abc"})
|
||||
# await projects_db.update("123", {"name": "Updated"})
|
||||
# await projects_db.delete("123")
|
||||
Reference in New Issue
Block a user