This commit is contained in:
el 2025-04-01 16:33:54 +02:00
commit d05799fe65
60 changed files with 7078 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

7
server/app/api/api.py Normal file
View file

@ -0,0 +1,7 @@
from fastapi import APIRouter
from app.api.endpoints import containers, logs
api_router = APIRouter()
api_router.include_router(containers.router, prefix="/containers", tags=["containers"])
api_router.include_router(logs.router, prefix="/logs", tags=["logs"])

View file

@ -0,0 +1,31 @@
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from app.services.logs_service import logs_service
import asyncio
router = APIRouter()
@router.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
"""Endpoint WebSocket pour recevoir les logs en temps réel."""
await logs_service.connect(websocket)
try:
while True:
data = await websocket.receive_text()
# Ici, nous pourrions ajouter la logique pour filtrer les logs
# en fonction des préférences du client
await logs_service.broadcast_log(data)
except WebSocketDisconnect:
logs_service.disconnect(websocket)
@router.websocket("/ws/{container_id}")
async def container_logs_websocket(websocket: WebSocket, container_id: str):
"""Endpoint WebSocket pour recevoir les logs d'un conteneur spécifique."""
await logs_service.connect(websocket, container_id)
try:
async for log in logs_service.get_container_logs(container_id):
await websocket.send_text(log)
except WebSocketDisconnect:
logs_service.disconnect(websocket, container_id)
except Exception as e:
await websocket.send_text(f"Error: {str(e)}")
logs_service.disconnect(websocket, container_id)

37
server/app/auth.py Normal file
View file

@ -0,0 +1,37 @@
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from datetime import datetime, timedelta
from typing import Optional
# TODO: Déplacer dans un fichier de configuration
SECRET_KEY = "votre_clé_secrète_ici"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Impossible de valider les identifiants",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
except JWTError:
raise credentials_exception
return username

77
server/app/main.py Normal file
View file

@ -0,0 +1,77 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.websockets import WebSocket
import uvicorn
import logging
import asyncio
from app.routes import containers, agents
from app.services.agent import AgentService
# Configuration du logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(
title="Étoile Polaire",
description="API de gestion des conteneurs Docker et des agents",
version="1.0.0"
)
# Configuration CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000"], # Frontend URL
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Inclusion des routes
app.include_router(containers.router)
app.include_router(agents.router)
# Service d'agents
agent_service = AgentService()
@app.get("/")
async def root():
return {"message": "Bienvenue sur l'API Étoile Polaire"}
@app.get("/health")
async def health_check():
return {"status": "healthy"}
@app.on_event("startup")
async def startup_event():
"""Tâches à exécuter au démarrage de l'application."""
# Démarrer la tâche de nettoyage des agents inactifs
asyncio.create_task(cleanup_inactive_agents())
async def cleanup_inactive_agents():
"""Nettoie périodiquement les agents inactifs."""
while True:
try:
inactive_agents = await agent_service.cleanup_inactive_agents()
if inactive_agents:
logger.info(f"Agents inactifs supprimés : {inactive_agents}")
except Exception as e:
logger.error(f"Erreur lors du nettoyage des agents : {e}")
await asyncio.sleep(300) # Vérifier toutes les 5 minutes
# WebSocket pour les logs en temps réel
@app.websocket("/ws/logs")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
# TODO: Implémenter la logique de streaming des logs
data = await websocket.receive_text()
await websocket.send_text(f"Message reçu: {data}")
except Exception as e:
logger.error(f"Erreur WebSocket: {e}")
finally:
await websocket.close()
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)

56
server/app/models.py Normal file
View file

@ -0,0 +1,56 @@
from pydantic import BaseModel, Field
from typing import List, Optional, Dict
from datetime import datetime
from enum import Enum
class ContainerStatus(str, Enum):
RUNNING = "running"
STOPPED = "stopped"
PAUSED = "paused"
RESTARTING = "restarting"
REMOVED = "removed"
DEAD = "dead"
CREATED = "created"
class Container(BaseModel):
id: str
name: str
image: str
status: ContainerStatus
created: datetime
ports: Dict[str, List[Dict[str, str]]] = Field(default_factory=dict)
labels: Dict[str, str] = Field(default_factory=dict)
networks: List[str] = Field(default_factory=list)
volumes: List[str] = Field(default_factory=list)
env_vars: Dict[str, str] = Field(default_factory=dict)
restart_policy: Optional[str] = None
cpu_usage: Optional[float] = None
memory_usage: Optional[int] = None
network_usage: Optional[Dict[str, int]] = None
class Agent(BaseModel):
id: str
name: str
hostname: str
ip_address: str
docker_version: str
status: str
last_seen: datetime
containers: List[Container] = Field(default_factory=list)
class LogEntry(BaseModel):
container_id: str
timestamp: datetime
message: str
stream: str # stdout ou stderr
class ContainerUpdate(BaseModel):
container_id: str
action: str # start, stop, restart, update
agent_id: Optional[str] = None
class AgentRegistration(BaseModel):
name: str
hostname: str
ip_address: str
docker_version: str

View file

@ -0,0 +1,7 @@
from .container import Container, ContainerLog, ContainerUpdate, ContainerStats
from .agent import Agent, AgentRegistration
__all__ = [
'Container', 'ContainerLog', 'ContainerUpdate', 'ContainerStats',
'Agent', 'AgentRegistration'
]

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,54 @@
from pydantic import BaseModel
from typing import Optional, Dict, List
from datetime import datetime
from .container import Container
class AgentBase(BaseModel):
name: str
host: str
port: int
token: str
class AgentCreate(AgentBase):
pass
class AgentUpdate(BaseModel):
name: Optional[str] = None
host: Optional[str] = None
port: Optional[int] = None
token: Optional[str] = None
class Agent(AgentBase):
id: str
status: str
version: str
last_seen: datetime
created_at: datetime
updated_at: datetime
containers: List[Container]
class Config:
from_attributes = True
class AgentStatus(BaseModel):
status: str
version: str
last_seen: datetime
containers_count: int
system_info: Dict[str, str]
class AgentRegistration(BaseModel):
name: str
hostname: str
ip_address: str
docker_version: str
class Agent(BaseModel):
id: str
name: str
hostname: str
ip_address: str
docker_version: str
status: str
last_seen: datetime
containers: List[Container]

View file

@ -0,0 +1,54 @@
from pydantic import BaseModel
from typing import Dict, List, Optional, Any
from datetime import datetime
class PortBinding(BaseModel):
host_ip: str = "0.0.0.0"
host_port: int
container_port: int
protocol: str = "tcp"
class NetworkStats(BaseModel):
rx_bytes: int
rx_packets: int
rx_errors: int
rx_dropped: int
tx_bytes: int
tx_packets: int
tx_errors: int
tx_dropped: int
class ContainerStats(BaseModel):
cpu_percent: float
memory_usage: int
memory_limit: int
network: Dict[str, NetworkStats]
block_read: int
block_write: int
class Container(BaseModel):
id: str
name: str
status: str
image: str
created: str
ports: Optional[List[PortBinding]] = None
volumes: Optional[List[str]] = None
environment: Optional[Dict[str, str]] = None
networks: Optional[List[str]] = None
health_status: Optional[str] = None
restart_policy: Optional[str] = None
command: Optional[str] = None
class ContainerUpdate(BaseModel):
image: Optional[str] = None
command: Optional[str] = None
environment: Optional[Dict[str, str]] = None
ports: Optional[List[PortBinding]] = None
volumes: Optional[List[str]] = None
restart_policy: Optional[str] = None
class ContainerLog(BaseModel):
timestamp: datetime
message: str
stream: str = "stdout"

View file

@ -0,0 +1,4 @@
from . import containers
from . import agents
__all__ = ['containers', 'agents']

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,75 @@
from fastapi import APIRouter, HTTPException, Depends
from typing import List
from ..models.agent import Agent, AgentCreate, AgentUpdate
from ..services.agent_service import AgentService
from ..auth import get_current_user
router = APIRouter(prefix="/api/agents", tags=["agents"])
@router.get("/", response_model=List[Agent])
async def list_agents(current_user: str = Depends(get_current_user)):
"""Liste tous les agents."""
try:
agent_service = AgentService()
agents = await agent_service.list_agents()
return agents
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/", response_model=Agent)
async def create_agent(agent: AgentCreate, current_user: str = Depends(get_current_user)):
"""Crée un nouvel agent."""
try:
agent_service = AgentService()
new_agent = await agent_service.create_agent(agent)
return new_agent
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{agent_id}", response_model=Agent)
async def get_agent(agent_id: str, current_user: str = Depends(get_current_user)):
"""Récupère les informations d'un agent spécifique."""
try:
agent_service = AgentService()
agent = await agent_service.get_agent(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent non trouvé")
return agent
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.put("/{agent_id}", response_model=Agent)
async def update_agent(agent_id: str, agent: AgentUpdate, current_user: str = Depends(get_current_user)):
"""Met à jour un agent."""
try:
agent_service = AgentService()
updated_agent = await agent_service.update_agent(agent_id, agent)
if not updated_agent:
raise HTTPException(status_code=404, detail="Agent non trouvé")
return updated_agent
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/{agent_id}")
async def delete_agent(agent_id: str, current_user: str = Depends(get_current_user)):
"""Supprime un agent."""
try:
agent_service = AgentService()
success = await agent_service.delete_agent(agent_id)
if not success:
raise HTTPException(status_code=404, detail="Agent non trouvé")
return {"message": "Agent supprimé avec succès"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{agent_id}/status")
async def get_agent_status(agent_id: str, current_user: str = Depends(get_current_user)):
"""Récupère le statut d'un agent."""
try:
agent_service = AgentService()
status = await agent_service.get_agent_status(agent_id)
if not status:
raise HTTPException(status_code=404, detail="Agent non trouvé")
return status
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View file

@ -0,0 +1,270 @@
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
from typing import List, Dict, Any, Set
from ..models.container import Container, ContainerUpdate, ContainerStats
from ..services.docker import DockerService
from ..services.redis import RedisService
import asyncio
import logging
import docker
from weakref import WeakSet
from contextlib import asynccontextmanager
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/containers", tags=["containers"])
docker_service = DockerService()
redis_service = RedisService()
# Garder une trace des connexions WebSocket actives avec leurs tâches associées
active_connections: Dict[str, Dict[WebSocket, asyncio.Task]] = {}
async def cleanup_connection(container_id: str, websocket: WebSocket):
"""Nettoie proprement une connexion WebSocket."""
try:
if container_id in active_connections and websocket in active_connections[container_id]:
# Annuler la tâche associée si elle existe
task = active_connections[container_id][websocket]
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
# Supprimer la connexion
del active_connections[container_id][websocket]
# Si plus de connexions pour ce conteneur, nettoyer le dictionnaire
if not active_connections[container_id]:
del active_connections[container_id]
# Fermer la connexion WebSocket
await websocket.close()
except Exception as e:
logger.error(f"Erreur lors du nettoyage de la connexion pour {container_id}: {e}")
@asynccontextmanager
async def manage_websocket_connection(websocket: WebSocket, container_id: str):
"""Gère le cycle de vie d'une connexion WebSocket."""
try:
await websocket.accept()
# Initialiser le dictionnaire pour ce conteneur si nécessaire
if container_id not in active_connections:
active_connections[container_id] = {}
yield
finally:
await cleanup_connection(container_id, websocket)
@router.get("/", response_model=List[Container])
async def list_containers():
"""Liste tous les conteneurs Docker."""
try:
logger.info("Début de la récupération de la liste des conteneurs")
containers = await docker_service.list_containers()
if not containers:
logger.warning("Aucun conteneur trouvé")
return []
logger.info(f"Conteneurs trouvés avec succès : {len(containers)}")
return containers
except docker.errors.APIError as e:
logger.error(f"Erreur API Docker lors de la récupération des conteneurs : {e}")
raise HTTPException(status_code=500, detail=f"Erreur Docker: {str(e)}")
except Exception as e:
logger.error(f"Erreur inattendue lors de la récupération des conteneurs : {e}")
raise HTTPException(status_code=500, detail=str(e))
def validate_container_id(container_id: str) -> None:
"""Valide l'ID d'un conteneur."""
if not container_id or container_id == "undefined":
logger.error("ID de conteneur invalide ou undefined")
raise HTTPException(status_code=400, detail="ID de conteneur invalide")
@router.get("/{container_id}", response_model=Container)
async def get_container(container_id: str):
"""Récupère les informations d'un conteneur spécifique."""
try:
validate_container_id(container_id)
container = await docker_service.get_container(container_id)
return container
except HTTPException:
raise
except Exception as e:
logger.error(f"Erreur lors de la récupération du conteneur {container_id} : {e}")
raise HTTPException(status_code=404, detail="Conteneur non trouvé")
@router.get("/{container_id}/logs")
async def get_container_logs(container_id: str, tail: int = 100):
"""Récupère les logs d'un conteneur."""
try:
validate_container_id(container_id)
logger.info(f"Requête de logs pour le conteneur {container_id}")
# Récupérer les logs
logs = await docker_service.get_container_logs(container_id, tail=tail)
if not logs:
logger.warning(f"Aucun log disponible pour le conteneur {container_id}")
return {"message": "Aucun log disponible pour ce conteneur"}
logger.info(f"Logs récupérés avec succès pour le conteneur {container_id}")
return {"logs": logs}
except HTTPException:
raise
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
raise HTTPException(status_code=404, detail="Conteneur non trouvé")
except docker.errors.APIError as e:
logger.error(f"Erreur API Docker pour le conteneur {container_id}: {e}")
raise HTTPException(status_code=500, detail=f"Erreur Docker: {str(e)}")
except Exception as e:
logger.error(f"Erreur lors de la récupération des logs du conteneur {container_id}: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{container_id}/stats")
async def get_container_stats(container_id: str):
"""Récupère les statistiques d'un conteneur."""
try:
validate_container_id(container_id)
logger.info(f"Récupération des statistiques pour le conteneur {container_id}")
stats = await docker_service.get_container_stats(container_id)
return stats
except HTTPException:
raise
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
raise HTTPException(status_code=404, detail=f"Conteneur {container_id} non trouvé")
except Exception as e:
logger.error(f"Erreur lors de la récupération des stats du conteneur {container_id}: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{container_id}/start")
async def start_container(container_id: str):
"""Démarre un conteneur."""
try:
validate_container_id(container_id)
await docker_service.start_container(container_id)
return {"message": "Conteneur démarré avec succès"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Erreur lors du démarrage du conteneur {container_id} : {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{container_id}/stop")
async def stop_container(container_id: str):
"""Arrête un conteneur."""
try:
validate_container_id(container_id)
await docker_service.stop_container(container_id)
return {"message": "Conteneur arrêté avec succès"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Erreur lors de l'arrêt du conteneur {container_id} : {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{container_id}/restart")
async def restart_container(container_id: str):
"""Redémarre un conteneur."""
try:
validate_container_id(container_id)
await docker_service.restart_container(container_id)
return {"message": "Conteneur redémarré avec succès"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Erreur lors du redémarrage du conteneur {container_id} : {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.websocket("/{container_id}/logs/ws")
async def websocket_logs(websocket: WebSocket, container_id: str):
"""WebSocket pour les logs en temps réel d'un conteneur."""
if not container_id or container_id == "undefined":
logger.error("ID de conteneur invalide ou undefined")
try:
await websocket.send_json({"error": True, "message": "ID de conteneur invalide"})
except:
pass
return
async with manage_websocket_connection(websocket, container_id):
try:
# Créer une tâche pour suivre les logs
task = asyncio.create_task(handle_container_logs(websocket, container_id))
active_connections[container_id][websocket] = task
# Attendre que la tâche se termine ou que le client se déconnecte
try:
await task
except asyncio.CancelledError:
logger.info(f"Tâche de suivi des logs annulée pour {container_id}")
except WebSocketDisconnect:
logger.info(f"Client WebSocket déconnecté pour le conteneur {container_id}")
except Exception as e:
logger.error(f"Erreur WebSocket pour le conteneur {container_id}: {e}")
try:
await websocket.send_json({"error": True, "message": str(e)})
except:
pass
async def handle_container_logs(websocket: WebSocket, container_id: str):
"""Gère l'envoi des logs pour un conteneur."""
try:
# Buffer pour accumuler les logs
log_buffer = []
last_send_time = asyncio.get_event_loop().time()
BUFFER_SIZE = 100 # Nombre maximum de logs dans le buffer
FLUSH_INTERVAL = 0.1 # Intervalle minimum entre les envois (100ms)
logger.info(f"Démarrage du suivi des logs pour le conteneur {container_id}")
async for log in docker_service.follow_container_logs(container_id):
# Vérifier si la connexion est toujours active
if container_id not in active_connections or websocket not in active_connections[container_id]:
logger.info(f"Connexion WebSocket fermée pour le conteneur {container_id}")
break
# Ajouter le log au buffer
log_buffer.append(log)
current_time = asyncio.get_event_loop().time()
# Envoyer les logs si le buffer est plein ou si assez de temps s'est écoulé
if len(log_buffer) >= BUFFER_SIZE or (current_time - last_send_time) >= FLUSH_INTERVAL:
if log_buffer:
try:
await websocket.send_json({"logs": log_buffer})
logger.debug(f"Envoi de {len(log_buffer)} logs pour le conteneur {container_id}")
except WebSocketDisconnect:
logger.info(f"Client WebSocket déconnecté pendant l'envoi des logs pour {container_id}")
break
except Exception as e:
logger.error(f"Erreur lors de l'envoi des logs pour {container_id}: {e}")
break
log_buffer = []
last_send_time = current_time
# Envoyer les logs restants
if log_buffer:
try:
await websocket.send_json({"logs": log_buffer})
logger.debug(f"Envoi des {len(log_buffer)} logs restants pour le conteneur {container_id}")
except WebSocketDisconnect:
logger.info(f"Client WebSocket déconnecté pendant l'envoi des logs restants pour {container_id}")
except Exception as e:
logger.error(f"Erreur lors de l'envoi des logs restants pour {container_id}: {e}")
except asyncio.CancelledError:
logger.info(f"Suivi des logs annulé pour le conteneur {container_id}")
raise
except Exception as e:
logger.error(f"Erreur lors du suivi des logs pour {container_id}: {e}")
raise

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,74 @@
import uuid
from typing import List, Optional, Dict
from datetime import datetime, timedelta
from ..models import Agent, AgentRegistration
from .docker import DockerService
class AgentService:
def __init__(self):
self.agents: Dict[str, Agent] = {}
self.docker_service = DockerService()
async def register_agent(self, registration: AgentRegistration) -> Agent:
"""Enregistre un nouvel agent."""
agent_id = str(uuid.uuid4())
agent = Agent(
id=agent_id,
name=registration.name,
hostname=registration.hostname,
ip_address=registration.ip_address,
docker_version=registration.docker_version,
status="active",
last_seen=datetime.utcnow(),
containers=[]
)
self.agents[agent_id] = agent
return agent
async def update_agent_status(self, agent_id: str) -> Optional[Agent]:
"""Met à jour le statut d'un agent."""
if agent_id not in self.agents:
return None
agent = self.agents[agent_id]
agent.last_seen = datetime.utcnow()
agent.status = "active"
return agent
async def get_agent(self, agent_id: str) -> Optional[Agent]:
"""Récupère un agent par son ID."""
return self.agents.get(agent_id)
async def list_agents(self) -> List[Agent]:
"""Liste tous les agents enregistrés."""
return list(self.agents.values())
async def remove_agent(self, agent_id: str) -> bool:
"""Supprime un agent."""
if agent_id in self.agents:
del self.agents[agent_id]
return True
return False
async def cleanup_inactive_agents(self, timeout_minutes: int = 5) -> List[str]:
"""Nettoie les agents inactifs."""
now = datetime.utcnow()
inactive_agents = [
agent_id for agent_id, agent in self.agents.items()
if (now - agent.last_seen) > timedelta(minutes=timeout_minutes)
]
for agent_id in inactive_agents:
await self.remove_agent(agent_id)
return inactive_agents
async def update_agent_containers(self, agent_id: str) -> Optional[Agent]:
"""Met à jour la liste des conteneurs d'un agent."""
if agent_id not in self.agents:
return None
agent = self.agents[agent_id]
agent.containers = await self.docker_service.list_containers()
agent.last_seen = datetime.utcnow()
return agent

View file

@ -0,0 +1,75 @@
import uuid
from typing import List, Optional
from datetime import datetime
from ..models.agent import Agent, AgentCreate, AgentUpdate, AgentStatus
class AgentService:
def __init__(self):
# TODO: Remplacer par une vraie base de données
self.agents: List[Agent] = []
async def list_agents(self) -> List[Agent]:
"""Liste tous les agents."""
return self.agents
async def create_agent(self, agent: AgentCreate) -> Agent:
"""Crée un nouvel agent."""
new_agent = Agent(
id=str(uuid.uuid4()),
name=agent.name,
host=agent.host,
port=agent.port,
token=agent.token,
status="offline",
version="1.0.0",
last_seen=datetime.now(),
created_at=datetime.now(),
updated_at=datetime.now()
)
self.agents.append(new_agent)
return new_agent
async def get_agent(self, agent_id: str) -> Optional[Agent]:
"""Récupère un agent par son ID."""
return next((agent for agent in self.agents if agent.id == agent_id), None)
async def update_agent(self, agent_id: str, agent_update: AgentUpdate) -> Optional[Agent]:
"""Met à jour un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return None
update_data = agent_update.dict(exclude_unset=True)
for key, value in update_data.items():
setattr(agent, key, value)
agent.updated_at = datetime.now()
return agent
async def delete_agent(self, agent_id: str) -> bool:
"""Supprime un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return False
self.agents.remove(agent)
return True
async def get_agent_status(self, agent_id: str) -> Optional[AgentStatus]:
"""Récupère le statut d'un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return None
# TODO: Implémenter la vérification réelle du statut
return AgentStatus(
status=agent.status,
version=agent.version,
last_seen=agent.last_seen,
containers_count=0,
system_info={
"os": "Linux",
"arch": "x86_64",
"docker_version": "20.10.0"
}
)

View file

@ -0,0 +1,387 @@
import docker
from typing import List, Dict, Any, Optional, AsyncGenerator
import asyncio
import logging
from ..models.container import Container, ContainerStats, NetworkStats, PortBinding
logger = logging.getLogger(__name__)
class DockerService:
def __init__(self):
"""Initialise le service Docker."""
self.client = None
self.init_client()
def init_client(self):
"""Initialise ou réinitialise le client Docker."""
try:
if self.client:
try:
# Tester si le client existant est toujours valide
self.client.ping()
logger.info("Client Docker existant est valide")
return
except:
logger.warning("Client Docker existant n'est plus valide")
self.client = None
self.client = docker.from_env()
self.client.ping() # Vérifier que la connexion fonctionne
logger.info("Nouveau client Docker initialisé avec succès")
except Exception as e:
logger.error(f"Erreur lors de l'initialisation du client Docker: {e}")
raise
def ensure_client(self):
"""S'assure que le client Docker est initialisé et valide."""
try:
if not self.client:
logger.warning("Client Docker non initialisé, tentative de réinitialisation")
self.init_client()
return
# Test simple pour vérifier que le client est fonctionnel
self.client.ping()
except Exception as e:
logger.warning(f"Client Docker invalide, tentative de réinitialisation: {e}")
self.init_client()
def _parse_port_bindings(self, ports: Dict) -> List[PortBinding]:
"""Convertit les ports Docker en modèle PortBinding."""
bindings = []
if not ports:
return bindings
for container_port, host_bindings in ports.items():
if not host_bindings:
continue
# Format: "8080/tcp"
port, proto = container_port.split("/")
for binding in host_bindings:
bindings.append(PortBinding(
host_ip=binding.get("HostIp", "0.0.0.0"),
host_port=int(binding.get("HostPort", port)),
container_port=int(port),
protocol=proto
))
return bindings
def _parse_environment(self, env: List[str]) -> Dict[str, str]:
"""Convertit les variables d'environnement en dictionnaire."""
result = {}
if not env:
return result
for var in env:
if "=" in var:
key, value = var.split("=", 1)
result[key] = value
return result
async def list_containers(self) -> List[Container]:
"""Liste tous les conteneurs Docker."""
try:
self.ensure_client()
logger.info("Début de la récupération de la liste des conteneurs")
containers = self.client.containers.list(all=True)
logger.info(f"Nombre de conteneurs trouvés: {len(containers)}")
result = []
for container in containers:
try:
attrs = container.attrs
container_model = Container(
id=container.id,
name=container.name,
status=container.status,
image=container.image.tags[0] if container.image.tags else "none",
created=attrs['Created'],
ports=self._parse_port_bindings(attrs['NetworkSettings']['Ports']),
volumes=[v['Source'] for v in attrs['Mounts']],
environment=self._parse_environment(attrs['Config']['Env']),
networks=list(attrs['NetworkSettings']['Networks'].keys()),
health_status=attrs.get('State', {}).get('Health', {}).get('Status'),
restart_policy=attrs['HostConfig']['RestartPolicy']['Name'],
command=attrs['Config']['Cmd'][0] if attrs['Config']['Cmd'] else None
)
result.append(container_model)
logger.debug(f"Conteneur ajouté: {container.name}")
except Exception as e:
logger.error(f"Erreur lors de la conversion du conteneur {container.name}: {e}")
continue
return result
except Exception as e:
logger.error(f"Erreur lors de la liste des conteneurs: {e}")
raise
async def get_container(self, container_id: str) -> Container:
"""Récupère les informations d'un conteneur spécifique."""
self.ensure_client()
container = self.client.containers.get(container_id)
attrs = container.attrs
return Container(
id=container.id,
name=container.name,
status=container.status,
image=container.image.tags[0] if container.image.tags else "none",
created=attrs['Created'],
ports=self._parse_port_bindings(attrs['NetworkSettings']['Ports']),
volumes=[v['Source'] for v in attrs['Mounts']],
environment=self._parse_environment(attrs['Config']['Env']),
networks=list(attrs['NetworkSettings']['Networks'].keys()),
health_status=attrs.get('State', {}).get('Health', {}).get('Status'),
restart_policy=attrs['HostConfig']['RestartPolicy']['Name'],
command=attrs['Config']['Cmd'][0] if attrs['Config']['Cmd'] else None
)
async def get_container_logs(self, container_id: str, tail: int = 100) -> List[Dict[str, str]]:
"""Récupère les logs d'un conteneur."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
if not container:
logger.error(f"Conteneur {container_id} non trouvé")
return []
logger.info(f"Récupération des logs pour le conteneur {container_id}")
# Utilisation de l'API Docker pour obtenir les logs
logs = container.logs(
tail=tail,
timestamps=True,
stdout=True,
stderr=True
)
if not logs:
logger.warning(f"Aucun log trouvé pour le conteneur {container_id}")
return []
decoded_logs = logs.decode('utf-8')
log_lines = []
for line in decoded_logs.split('\n'):
if not line.strip():
continue
# Format attendu: "2024-04-01T11:10:27.000000000Z message..."
try:
timestamp, message = line.split(' ', 1)
log_lines.append({
"timestamp": timestamp,
"message": message.strip(),
"stream": "stdout" # Par défaut stdout
})
except ValueError:
# Si le format n'est pas celui attendu, on garde la ligne complète
log_lines.append({
"timestamp": "",
"message": line.strip(),
"stream": "stdout"
})
logger.info(f"Nombre de lignes de logs trouvées : {len(log_lines)}")
return log_lines
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
return []
except Exception as e:
logger.error(f"Erreur lors de la récupération des logs du conteneur {container_id}: {e}")
raise
async def get_container_stats(self, container_id: str) -> ContainerStats:
"""Récupère les statistiques d'un conteneur."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
stats = container.stats(stream=False)
logger.debug(f"Stats brutes reçues pour {container_id}: {stats}")
# Vérifier la présence des clés nécessaires
if not all(key in stats for key in ['cpu_stats', 'precpu_stats', 'memory_stats']):
logger.error(f"Données de stats incomplètes pour le conteneur {container_id}")
raise ValueError("Données de statistiques incomplètes")
# Calculer le pourcentage CPU avec vérification des valeurs
try:
cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
system_delta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
online_cpus = stats['cpu_stats'].get('online_cpus', 1) # Utiliser 1 comme valeur par défaut
if system_delta > 0:
cpu_percent = (cpu_delta / system_delta) * 100.0 * online_cpus
else:
cpu_percent = 0.0
except (KeyError, TypeError) as e:
logger.warning(f"Erreur lors du calcul du CPU pour {container_id}: {e}")
cpu_percent = 0.0
# Convertir les statistiques réseau avec gestion des erreurs
networks = {}
for net_name, net_stats in stats.get('networks', {}).items():
try:
networks[net_name] = NetworkStats(
rx_bytes=net_stats.get('rx_bytes', 0),
rx_packets=net_stats.get('rx_packets', 0),
rx_errors=net_stats.get('rx_errors', 0),
rx_dropped=net_stats.get('rx_dropped', 0),
tx_bytes=net_stats.get('tx_bytes', 0),
tx_packets=net_stats.get('tx_packets', 0),
tx_errors=net_stats.get('tx_errors', 0),
tx_dropped=net_stats.get('tx_dropped', 0)
)
except Exception as e:
logger.warning(f"Erreur lors de la conversion des stats réseau pour {net_name}: {e}")
continue
# Récupérer les statistiques de bloc avec gestion des erreurs
try:
io_stats = stats.get('blkio_stats', {}).get('io_service_bytes_recursive', [])
block_read = io_stats[0].get('value', 0) if len(io_stats) > 0 else 0
block_write = io_stats[1].get('value', 0) if len(io_stats) > 1 else 0
except (IndexError, KeyError, AttributeError) as e:
logger.warning(f"Erreur lors de la récupération des stats de bloc pour {container_id}: {e}")
block_read = 0
block_write = 0
return ContainerStats(
cpu_percent=cpu_percent,
memory_usage=stats.get('memory_stats', {}).get('usage', 0),
memory_limit=stats.get('memory_stats', {}).get('limit', 0),
network=networks,
block_read=block_read,
block_write=block_write
)
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
raise
except Exception as e:
logger.error(f"Erreur lors de la récupération des stats du conteneur {container_id}: {e}")
raise
async def start_container(self, container_id: str) -> None:
"""Démarre un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.start()
async def stop_container(self, container_id: str) -> None:
"""Arrête un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.stop()
async def restart_container(self, container_id: str) -> None:
"""Redémarre un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.restart()
async def update_container(self, container_id: str) -> None:
"""Met à jour un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.restart()
async def follow_container_logs(self, container_id: str) -> AsyncGenerator[Dict[str, str], None]:
"""Suit les logs d'un conteneur en temps réel en utilisant l'API Docker Python."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
logger.info(f"Démarrage du suivi des logs pour le conteneur {container_id}")
# Utiliser l'API Docker Python pour obtenir un flux de logs continu
log_stream = container.logs(
stream=True,
follow=True,
timestamps=True,
stdout=True,
stderr=True,
tail=100 # Commencer avec les 100 derniers logs
)
for log in log_stream:
try:
# Décoder la ligne de log
line = log.decode('utf-8').strip()
if not line:
continue
logger.debug(f"Ligne de log brute reçue: {line}")
# Format attendu: "2024-04-01T11:10:27.000000000Z message..."
try:
# Trouver le premier espace qui sépare la date du message
space_index = line.find(' ')
if space_index > 0:
timestamp = line[:space_index]
message = line[space_index + 1:].strip()
logger.debug(f"Timestamp extrait: {timestamp}")
logger.debug(f"Message extrait: {message}")
# Vérifier si le timestamp est valide (format ISO 8601)
if timestamp.endswith('Z'):
# Convertir le timestamp en format ISO 8601 standard
timestamp = timestamp.replace('Z', '+00:00')
# Vérifier que le format est valide
if not timestamp.replace('.', '').replace(':', '').replace('-', '').replace('T', '').replace('+', '').isdigit():
logger.warning(f"Format de timestamp invalide: {timestamp}")
timestamp = ""
else:
logger.warning(f"Timestamp ne se termine pas par Z: {timestamp}")
timestamp = ""
yield {
"timestamp": timestamp,
"message": message,
"stream": "stdout"
}
else:
# Si pas de timestamp valide, envoyer le message sans timestamp
logger.warning(f"Pas d'espace trouvé dans la ligne: {line}")
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except ValueError as e:
logger.error(f"Erreur lors du parsing de la ligne: {e}")
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except UnicodeDecodeError:
# Fallback sur latin-1 si UTF-8 échoue
line = log.decode('latin-1').strip()
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except Exception as e:
logger.error(f"Erreur lors du décodage d'un log : {e}")
continue
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
except Exception as e:
logger.error(f"Erreur lors du suivi des logs du conteneur {container_id}: {e}")
raise
finally:
logger.info(f"Arrêt du suivi des logs pour le conteneur {container_id}")
if 'log_stream' in locals():
try:
log_stream.close()
except:
pass

View file

@ -0,0 +1,77 @@
import docker
from typing import List, Dict, Any
from datetime import datetime
from ..models.container import Container, ContainerLog, ContainerStats
class DockerService:
def __init__(self):
self.client = docker.from_env()
async def list_containers(self) -> List[Container]:
"""Liste tous les conteneurs Docker."""
containers = self.client.containers.list(all=True)
return [
Container(
id=container.id,
name=container.name,
image=container.image.tags[0] if container.image.tags else container.image.id,
status=container.status,
ports=container.ports,
created_at=datetime.fromtimestamp(container.attrs['Created']),
updated_at=datetime.now()
)
for container in containers
]
async def get_container_logs(self, container_id: str) -> List[ContainerLog]:
"""Récupère les logs d'un conteneur."""
container = self.client.containers.get(container_id)
logs = container.logs(tail=100, timestamps=True).decode('utf-8').split('\n')
return [
ContainerLog(
timestamp=datetime.fromisoformat(line.split(' ')[0].replace('T', ' ').replace('Z', '')),
stream=line.split(' ')[1].strip('[]'),
message=' '.join(line.split(' ')[2:])
)
for line in logs if line
]
async def update_container(self, container_id: str) -> bool:
"""Met à jour un conteneur avec la dernière version de son image."""
try:
container = self.client.containers.get(container_id)
image = container.image
container.stop()
container.remove()
new_container = self.client.containers.run(
image=image.id,
name=container.name,
ports=container.ports,
environment=container.attrs['Config']['Env'],
restart_policy={"Name": container.attrs['HostConfig']['RestartPolicy']['Name']}
)
return True
except Exception as e:
print(f"Erreur lors de la mise à jour du conteneur : {e}")
return False
async def get_container_stats(self, container_id: str) -> ContainerStats:
"""Récupère les statistiques d'un conteneur."""
container = self.client.containers.get(container_id)
stats = container.stats(stream=False)
# Calcul du pourcentage CPU
cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
system_delta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
cpu_percent = (cpu_delta / system_delta) * 100.0 if system_delta > 0 else 0.0
return ContainerStats(
cpu_percent=cpu_percent,
memory_usage=stats['memory_stats']['usage'],
memory_limit=stats['memory_stats']['limit'],
network_rx=stats['networks']['eth0']['rx_bytes'],
network_tx=stats['networks']['eth0']['tx_bytes'],
block_read=stats['blkio_stats']['io_service_bytes_recursive'][0]['value'],
block_write=stats['blkio_stats']['io_service_bytes_recursive'][1]['value'],
timestamp=datetime.now()
)

View file

@ -0,0 +1,58 @@
from typing import AsyncGenerator, Dict, List
import docker
import json
from datetime import datetime
from fastapi import WebSocket
from app.core.config import settings
class LogsService:
def __init__(self):
self.client = docker.from_env()
self.active_connections: Dict[str, List[WebSocket]] = {}
async def get_container_logs(self, container_id: str, follow: bool = True) -> AsyncGenerator[str, None]:
"""Récupère les logs d'un conteneur en temps réel."""
try:
container = self.client.containers.get(container_id)
for log in container.logs(stream=True, follow=follow, timestamps=True):
log_entry = {
"timestamp": datetime.fromisoformat(log.decode().split()[0].decode()),
"container_id": container_id,
"container_name": container.name,
"message": log.decode().split(b' ', 1)[1].decode(),
"stream": "stdout" if log.decode().split(b' ', 1)[1].decode().startswith("stdout") else "stderr"
}
yield json.dumps(log_entry)
except docker.errors.NotFound:
yield json.dumps({
"error": f"Conteneur {container_id} non trouvé"
})
except Exception as e:
yield json.dumps({
"error": str(e)
})
async def connect(self, websocket: WebSocket, container_id: str = None):
"""Connecte un client WebSocket pour recevoir les logs."""
await websocket.accept()
if container_id not in self.active_connections:
self.active_connections[container_id] = []
self.active_connections[container_id].append(websocket)
def disconnect(self, websocket: WebSocket, container_id: str = None):
"""Déconnecte un client WebSocket."""
if container_id in self.active_connections:
self.active_connections[container_id].remove(websocket)
if not self.active_connections[container_id]:
del self.active_connections[container_id]
async def broadcast_log(self, log: str, container_id: str = None):
"""Diffuse un log à tous les clients connectés."""
if container_id in self.active_connections:
for connection in self.active_connections[container_id]:
try:
await connection.send_text(log)
except Exception:
self.disconnect(connection, container_id)
logs_service = LogsService()

View file

@ -0,0 +1,33 @@
import redis
import json
from typing import List, Dict, Any
from datetime import datetime
from ..models.container import ContainerLog
class RedisService:
def __init__(self):
self.redis = redis.Redis(host='localhost', port=6379, db=0)
self.logs_key_prefix = "container_logs:"
async def get_logs(self, container_id: str, limit: int = 100) -> List[ContainerLog]:
"""Récupère les logs d'un conteneur depuis Redis."""
key = f"{self.logs_key_prefix}{container_id}"
logs = self.redis.lrange(key, 0, limit - 1)
return [ContainerLog(**json.loads(log)) for log in logs]
async def add_log(self, container_id: str, log: ContainerLog) -> None:
"""Ajoute un log pour un conteneur dans Redis."""
key = f"{self.logs_key_prefix}{container_id}"
self.redis.lpush(key, log.model_dump_json())
# Garder seulement les 1000 derniers logs
self.redis.ltrim(key, 0, 999)
async def clear_logs(self, container_id: str) -> None:
"""Supprime tous les logs d'un conteneur."""
key = f"{self.logs_key_prefix}{container_id}"
self.redis.delete(key)
async def get_container_ids(self) -> List[str]:
"""Récupère la liste des IDs de conteneurs ayant des logs."""
keys = self.redis.keys(f"{self.logs_key_prefix}*")
return [key.replace(self.logs_key_prefix, "") for key in keys]

14
server/requirements.txt Normal file
View file

@ -0,0 +1,14 @@
fastapi>=0.109.0
uvicorn>=0.27.0
python-jose[cryptography]>=3.3.0
passlib[bcrypt]>=1.7.4
python-multipart>=0.0.6
docker>=6.1.3
pydantic>=2.6.0
python-dotenv>=1.0.0
redis>=5.0.1
sqlalchemy>=2.0.23
websockets>=12.0
aiohttp>=3.9.1
pytest>=7.4.3
httpx>=0.25.2