This commit is contained in:
el 2025-04-01 16:33:54 +02:00
commit d05799fe65
60 changed files with 7078 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,74 @@
import uuid
from typing import List, Optional, Dict
from datetime import datetime, timedelta
from ..models import Agent, AgentRegistration
from .docker import DockerService
class AgentService:
def __init__(self):
self.agents: Dict[str, Agent] = {}
self.docker_service = DockerService()
async def register_agent(self, registration: AgentRegistration) -> Agent:
"""Enregistre un nouvel agent."""
agent_id = str(uuid.uuid4())
agent = Agent(
id=agent_id,
name=registration.name,
hostname=registration.hostname,
ip_address=registration.ip_address,
docker_version=registration.docker_version,
status="active",
last_seen=datetime.utcnow(),
containers=[]
)
self.agents[agent_id] = agent
return agent
async def update_agent_status(self, agent_id: str) -> Optional[Agent]:
"""Met à jour le statut d'un agent."""
if agent_id not in self.agents:
return None
agent = self.agents[agent_id]
agent.last_seen = datetime.utcnow()
agent.status = "active"
return agent
async def get_agent(self, agent_id: str) -> Optional[Agent]:
"""Récupère un agent par son ID."""
return self.agents.get(agent_id)
async def list_agents(self) -> List[Agent]:
"""Liste tous les agents enregistrés."""
return list(self.agents.values())
async def remove_agent(self, agent_id: str) -> bool:
"""Supprime un agent."""
if agent_id in self.agents:
del self.agents[agent_id]
return True
return False
async def cleanup_inactive_agents(self, timeout_minutes: int = 5) -> List[str]:
"""Nettoie les agents inactifs."""
now = datetime.utcnow()
inactive_agents = [
agent_id for agent_id, agent in self.agents.items()
if (now - agent.last_seen) > timedelta(minutes=timeout_minutes)
]
for agent_id in inactive_agents:
await self.remove_agent(agent_id)
return inactive_agents
async def update_agent_containers(self, agent_id: str) -> Optional[Agent]:
"""Met à jour la liste des conteneurs d'un agent."""
if agent_id not in self.agents:
return None
agent = self.agents[agent_id]
agent.containers = await self.docker_service.list_containers()
agent.last_seen = datetime.utcnow()
return agent

View file

@ -0,0 +1,75 @@
import uuid
from typing import List, Optional
from datetime import datetime
from ..models.agent import Agent, AgentCreate, AgentUpdate, AgentStatus
class AgentService:
def __init__(self):
# TODO: Remplacer par une vraie base de données
self.agents: List[Agent] = []
async def list_agents(self) -> List[Agent]:
"""Liste tous les agents."""
return self.agents
async def create_agent(self, agent: AgentCreate) -> Agent:
"""Crée un nouvel agent."""
new_agent = Agent(
id=str(uuid.uuid4()),
name=agent.name,
host=agent.host,
port=agent.port,
token=agent.token,
status="offline",
version="1.0.0",
last_seen=datetime.now(),
created_at=datetime.now(),
updated_at=datetime.now()
)
self.agents.append(new_agent)
return new_agent
async def get_agent(self, agent_id: str) -> Optional[Agent]:
"""Récupère un agent par son ID."""
return next((agent for agent in self.agents if agent.id == agent_id), None)
async def update_agent(self, agent_id: str, agent_update: AgentUpdate) -> Optional[Agent]:
"""Met à jour un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return None
update_data = agent_update.dict(exclude_unset=True)
for key, value in update_data.items():
setattr(agent, key, value)
agent.updated_at = datetime.now()
return agent
async def delete_agent(self, agent_id: str) -> bool:
"""Supprime un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return False
self.agents.remove(agent)
return True
async def get_agent_status(self, agent_id: str) -> Optional[AgentStatus]:
"""Récupère le statut d'un agent."""
agent = await self.get_agent(agent_id)
if not agent:
return None
# TODO: Implémenter la vérification réelle du statut
return AgentStatus(
status=agent.status,
version=agent.version,
last_seen=agent.last_seen,
containers_count=0,
system_info={
"os": "Linux",
"arch": "x86_64",
"docker_version": "20.10.0"
}
)

View file

@ -0,0 +1,387 @@
import docker
from typing import List, Dict, Any, Optional, AsyncGenerator
import asyncio
import logging
from ..models.container import Container, ContainerStats, NetworkStats, PortBinding
logger = logging.getLogger(__name__)
class DockerService:
def __init__(self):
"""Initialise le service Docker."""
self.client = None
self.init_client()
def init_client(self):
"""Initialise ou réinitialise le client Docker."""
try:
if self.client:
try:
# Tester si le client existant est toujours valide
self.client.ping()
logger.info("Client Docker existant est valide")
return
except:
logger.warning("Client Docker existant n'est plus valide")
self.client = None
self.client = docker.from_env()
self.client.ping() # Vérifier que la connexion fonctionne
logger.info("Nouveau client Docker initialisé avec succès")
except Exception as e:
logger.error(f"Erreur lors de l'initialisation du client Docker: {e}")
raise
def ensure_client(self):
"""S'assure que le client Docker est initialisé et valide."""
try:
if not self.client:
logger.warning("Client Docker non initialisé, tentative de réinitialisation")
self.init_client()
return
# Test simple pour vérifier que le client est fonctionnel
self.client.ping()
except Exception as e:
logger.warning(f"Client Docker invalide, tentative de réinitialisation: {e}")
self.init_client()
def _parse_port_bindings(self, ports: Dict) -> List[PortBinding]:
"""Convertit les ports Docker en modèle PortBinding."""
bindings = []
if not ports:
return bindings
for container_port, host_bindings in ports.items():
if not host_bindings:
continue
# Format: "8080/tcp"
port, proto = container_port.split("/")
for binding in host_bindings:
bindings.append(PortBinding(
host_ip=binding.get("HostIp", "0.0.0.0"),
host_port=int(binding.get("HostPort", port)),
container_port=int(port),
protocol=proto
))
return bindings
def _parse_environment(self, env: List[str]) -> Dict[str, str]:
"""Convertit les variables d'environnement en dictionnaire."""
result = {}
if not env:
return result
for var in env:
if "=" in var:
key, value = var.split("=", 1)
result[key] = value
return result
async def list_containers(self) -> List[Container]:
"""Liste tous les conteneurs Docker."""
try:
self.ensure_client()
logger.info("Début de la récupération de la liste des conteneurs")
containers = self.client.containers.list(all=True)
logger.info(f"Nombre de conteneurs trouvés: {len(containers)}")
result = []
for container in containers:
try:
attrs = container.attrs
container_model = Container(
id=container.id,
name=container.name,
status=container.status,
image=container.image.tags[0] if container.image.tags else "none",
created=attrs['Created'],
ports=self._parse_port_bindings(attrs['NetworkSettings']['Ports']),
volumes=[v['Source'] for v in attrs['Mounts']],
environment=self._parse_environment(attrs['Config']['Env']),
networks=list(attrs['NetworkSettings']['Networks'].keys()),
health_status=attrs.get('State', {}).get('Health', {}).get('Status'),
restart_policy=attrs['HostConfig']['RestartPolicy']['Name'],
command=attrs['Config']['Cmd'][0] if attrs['Config']['Cmd'] else None
)
result.append(container_model)
logger.debug(f"Conteneur ajouté: {container.name}")
except Exception as e:
logger.error(f"Erreur lors de la conversion du conteneur {container.name}: {e}")
continue
return result
except Exception as e:
logger.error(f"Erreur lors de la liste des conteneurs: {e}")
raise
async def get_container(self, container_id: str) -> Container:
"""Récupère les informations d'un conteneur spécifique."""
self.ensure_client()
container = self.client.containers.get(container_id)
attrs = container.attrs
return Container(
id=container.id,
name=container.name,
status=container.status,
image=container.image.tags[0] if container.image.tags else "none",
created=attrs['Created'],
ports=self._parse_port_bindings(attrs['NetworkSettings']['Ports']),
volumes=[v['Source'] for v in attrs['Mounts']],
environment=self._parse_environment(attrs['Config']['Env']),
networks=list(attrs['NetworkSettings']['Networks'].keys()),
health_status=attrs.get('State', {}).get('Health', {}).get('Status'),
restart_policy=attrs['HostConfig']['RestartPolicy']['Name'],
command=attrs['Config']['Cmd'][0] if attrs['Config']['Cmd'] else None
)
async def get_container_logs(self, container_id: str, tail: int = 100) -> List[Dict[str, str]]:
"""Récupère les logs d'un conteneur."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
if not container:
logger.error(f"Conteneur {container_id} non trouvé")
return []
logger.info(f"Récupération des logs pour le conteneur {container_id}")
# Utilisation de l'API Docker pour obtenir les logs
logs = container.logs(
tail=tail,
timestamps=True,
stdout=True,
stderr=True
)
if not logs:
logger.warning(f"Aucun log trouvé pour le conteneur {container_id}")
return []
decoded_logs = logs.decode('utf-8')
log_lines = []
for line in decoded_logs.split('\n'):
if not line.strip():
continue
# Format attendu: "2024-04-01T11:10:27.000000000Z message..."
try:
timestamp, message = line.split(' ', 1)
log_lines.append({
"timestamp": timestamp,
"message": message.strip(),
"stream": "stdout" # Par défaut stdout
})
except ValueError:
# Si le format n'est pas celui attendu, on garde la ligne complète
log_lines.append({
"timestamp": "",
"message": line.strip(),
"stream": "stdout"
})
logger.info(f"Nombre de lignes de logs trouvées : {len(log_lines)}")
return log_lines
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
return []
except Exception as e:
logger.error(f"Erreur lors de la récupération des logs du conteneur {container_id}: {e}")
raise
async def get_container_stats(self, container_id: str) -> ContainerStats:
"""Récupère les statistiques d'un conteneur."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
stats = container.stats(stream=False)
logger.debug(f"Stats brutes reçues pour {container_id}: {stats}")
# Vérifier la présence des clés nécessaires
if not all(key in stats for key in ['cpu_stats', 'precpu_stats', 'memory_stats']):
logger.error(f"Données de stats incomplètes pour le conteneur {container_id}")
raise ValueError("Données de statistiques incomplètes")
# Calculer le pourcentage CPU avec vérification des valeurs
try:
cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
system_delta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
online_cpus = stats['cpu_stats'].get('online_cpus', 1) # Utiliser 1 comme valeur par défaut
if system_delta > 0:
cpu_percent = (cpu_delta / system_delta) * 100.0 * online_cpus
else:
cpu_percent = 0.0
except (KeyError, TypeError) as e:
logger.warning(f"Erreur lors du calcul du CPU pour {container_id}: {e}")
cpu_percent = 0.0
# Convertir les statistiques réseau avec gestion des erreurs
networks = {}
for net_name, net_stats in stats.get('networks', {}).items():
try:
networks[net_name] = NetworkStats(
rx_bytes=net_stats.get('rx_bytes', 0),
rx_packets=net_stats.get('rx_packets', 0),
rx_errors=net_stats.get('rx_errors', 0),
rx_dropped=net_stats.get('rx_dropped', 0),
tx_bytes=net_stats.get('tx_bytes', 0),
tx_packets=net_stats.get('tx_packets', 0),
tx_errors=net_stats.get('tx_errors', 0),
tx_dropped=net_stats.get('tx_dropped', 0)
)
except Exception as e:
logger.warning(f"Erreur lors de la conversion des stats réseau pour {net_name}: {e}")
continue
# Récupérer les statistiques de bloc avec gestion des erreurs
try:
io_stats = stats.get('blkio_stats', {}).get('io_service_bytes_recursive', [])
block_read = io_stats[0].get('value', 0) if len(io_stats) > 0 else 0
block_write = io_stats[1].get('value', 0) if len(io_stats) > 1 else 0
except (IndexError, KeyError, AttributeError) as e:
logger.warning(f"Erreur lors de la récupération des stats de bloc pour {container_id}: {e}")
block_read = 0
block_write = 0
return ContainerStats(
cpu_percent=cpu_percent,
memory_usage=stats.get('memory_stats', {}).get('usage', 0),
memory_limit=stats.get('memory_stats', {}).get('limit', 0),
network=networks,
block_read=block_read,
block_write=block_write
)
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
raise
except Exception as e:
logger.error(f"Erreur lors de la récupération des stats du conteneur {container_id}: {e}")
raise
async def start_container(self, container_id: str) -> None:
"""Démarre un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.start()
async def stop_container(self, container_id: str) -> None:
"""Arrête un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.stop()
async def restart_container(self, container_id: str) -> None:
"""Redémarre un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.restart()
async def update_container(self, container_id: str) -> None:
"""Met à jour un conteneur."""
self.ensure_client()
container = self.client.containers.get(container_id)
container.restart()
async def follow_container_logs(self, container_id: str) -> AsyncGenerator[Dict[str, str], None]:
"""Suit les logs d'un conteneur en temps réel en utilisant l'API Docker Python."""
try:
self.ensure_client()
container = self.client.containers.get(container_id)
logger.info(f"Démarrage du suivi des logs pour le conteneur {container_id}")
# Utiliser l'API Docker Python pour obtenir un flux de logs continu
log_stream = container.logs(
stream=True,
follow=True,
timestamps=True,
stdout=True,
stderr=True,
tail=100 # Commencer avec les 100 derniers logs
)
for log in log_stream:
try:
# Décoder la ligne de log
line = log.decode('utf-8').strip()
if not line:
continue
logger.debug(f"Ligne de log brute reçue: {line}")
# Format attendu: "2024-04-01T11:10:27.000000000Z message..."
try:
# Trouver le premier espace qui sépare la date du message
space_index = line.find(' ')
if space_index > 0:
timestamp = line[:space_index]
message = line[space_index + 1:].strip()
logger.debug(f"Timestamp extrait: {timestamp}")
logger.debug(f"Message extrait: {message}")
# Vérifier si le timestamp est valide (format ISO 8601)
if timestamp.endswith('Z'):
# Convertir le timestamp en format ISO 8601 standard
timestamp = timestamp.replace('Z', '+00:00')
# Vérifier que le format est valide
if not timestamp.replace('.', '').replace(':', '').replace('-', '').replace('T', '').replace('+', '').isdigit():
logger.warning(f"Format de timestamp invalide: {timestamp}")
timestamp = ""
else:
logger.warning(f"Timestamp ne se termine pas par Z: {timestamp}")
timestamp = ""
yield {
"timestamp": timestamp,
"message": message,
"stream": "stdout"
}
else:
# Si pas de timestamp valide, envoyer le message sans timestamp
logger.warning(f"Pas d'espace trouvé dans la ligne: {line}")
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except ValueError as e:
logger.error(f"Erreur lors du parsing de la ligne: {e}")
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except UnicodeDecodeError:
# Fallback sur latin-1 si UTF-8 échoue
line = log.decode('latin-1').strip()
yield {
"timestamp": "",
"message": line,
"stream": "stdout"
}
except Exception as e:
logger.error(f"Erreur lors du décodage d'un log : {e}")
continue
except docker.errors.NotFound:
logger.error(f"Conteneur {container_id} non trouvé")
except Exception as e:
logger.error(f"Erreur lors du suivi des logs du conteneur {container_id}: {e}")
raise
finally:
logger.info(f"Arrêt du suivi des logs pour le conteneur {container_id}")
if 'log_stream' in locals():
try:
log_stream.close()
except:
pass

View file

@ -0,0 +1,77 @@
import docker
from typing import List, Dict, Any
from datetime import datetime
from ..models.container import Container, ContainerLog, ContainerStats
class DockerService:
def __init__(self):
self.client = docker.from_env()
async def list_containers(self) -> List[Container]:
"""Liste tous les conteneurs Docker."""
containers = self.client.containers.list(all=True)
return [
Container(
id=container.id,
name=container.name,
image=container.image.tags[0] if container.image.tags else container.image.id,
status=container.status,
ports=container.ports,
created_at=datetime.fromtimestamp(container.attrs['Created']),
updated_at=datetime.now()
)
for container in containers
]
async def get_container_logs(self, container_id: str) -> List[ContainerLog]:
"""Récupère les logs d'un conteneur."""
container = self.client.containers.get(container_id)
logs = container.logs(tail=100, timestamps=True).decode('utf-8').split('\n')
return [
ContainerLog(
timestamp=datetime.fromisoformat(line.split(' ')[0].replace('T', ' ').replace('Z', '')),
stream=line.split(' ')[1].strip('[]'),
message=' '.join(line.split(' ')[2:])
)
for line in logs if line
]
async def update_container(self, container_id: str) -> bool:
"""Met à jour un conteneur avec la dernière version de son image."""
try:
container = self.client.containers.get(container_id)
image = container.image
container.stop()
container.remove()
new_container = self.client.containers.run(
image=image.id,
name=container.name,
ports=container.ports,
environment=container.attrs['Config']['Env'],
restart_policy={"Name": container.attrs['HostConfig']['RestartPolicy']['Name']}
)
return True
except Exception as e:
print(f"Erreur lors de la mise à jour du conteneur : {e}")
return False
async def get_container_stats(self, container_id: str) -> ContainerStats:
"""Récupère les statistiques d'un conteneur."""
container = self.client.containers.get(container_id)
stats = container.stats(stream=False)
# Calcul du pourcentage CPU
cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - stats['precpu_stats']['cpu_usage']['total_usage']
system_delta = stats['cpu_stats']['system_cpu_usage'] - stats['precpu_stats']['system_cpu_usage']
cpu_percent = (cpu_delta / system_delta) * 100.0 if system_delta > 0 else 0.0
return ContainerStats(
cpu_percent=cpu_percent,
memory_usage=stats['memory_stats']['usage'],
memory_limit=stats['memory_stats']['limit'],
network_rx=stats['networks']['eth0']['rx_bytes'],
network_tx=stats['networks']['eth0']['tx_bytes'],
block_read=stats['blkio_stats']['io_service_bytes_recursive'][0]['value'],
block_write=stats['blkio_stats']['io_service_bytes_recursive'][1]['value'],
timestamp=datetime.now()
)

View file

@ -0,0 +1,58 @@
from typing import AsyncGenerator, Dict, List
import docker
import json
from datetime import datetime
from fastapi import WebSocket
from app.core.config import settings
class LogsService:
def __init__(self):
self.client = docker.from_env()
self.active_connections: Dict[str, List[WebSocket]] = {}
async def get_container_logs(self, container_id: str, follow: bool = True) -> AsyncGenerator[str, None]:
"""Récupère les logs d'un conteneur en temps réel."""
try:
container = self.client.containers.get(container_id)
for log in container.logs(stream=True, follow=follow, timestamps=True):
log_entry = {
"timestamp": datetime.fromisoformat(log.decode().split()[0].decode()),
"container_id": container_id,
"container_name": container.name,
"message": log.decode().split(b' ', 1)[1].decode(),
"stream": "stdout" if log.decode().split(b' ', 1)[1].decode().startswith("stdout") else "stderr"
}
yield json.dumps(log_entry)
except docker.errors.NotFound:
yield json.dumps({
"error": f"Conteneur {container_id} non trouvé"
})
except Exception as e:
yield json.dumps({
"error": str(e)
})
async def connect(self, websocket: WebSocket, container_id: str = None):
"""Connecte un client WebSocket pour recevoir les logs."""
await websocket.accept()
if container_id not in self.active_connections:
self.active_connections[container_id] = []
self.active_connections[container_id].append(websocket)
def disconnect(self, websocket: WebSocket, container_id: str = None):
"""Déconnecte un client WebSocket."""
if container_id in self.active_connections:
self.active_connections[container_id].remove(websocket)
if not self.active_connections[container_id]:
del self.active_connections[container_id]
async def broadcast_log(self, log: str, container_id: str = None):
"""Diffuse un log à tous les clients connectés."""
if container_id in self.active_connections:
for connection in self.active_connections[container_id]:
try:
await connection.send_text(log)
except Exception:
self.disconnect(connection, container_id)
logs_service = LogsService()

View file

@ -0,0 +1,33 @@
import redis
import json
from typing import List, Dict, Any
from datetime import datetime
from ..models.container import ContainerLog
class RedisService:
def __init__(self):
self.redis = redis.Redis(host='localhost', port=6379, db=0)
self.logs_key_prefix = "container_logs:"
async def get_logs(self, container_id: str, limit: int = 100) -> List[ContainerLog]:
"""Récupère les logs d'un conteneur depuis Redis."""
key = f"{self.logs_key_prefix}{container_id}"
logs = self.redis.lrange(key, 0, limit - 1)
return [ContainerLog(**json.loads(log)) for log in logs]
async def add_log(self, container_id: str, log: ContainerLog) -> None:
"""Ajoute un log pour un conteneur dans Redis."""
key = f"{self.logs_key_prefix}{container_id}"
self.redis.lpush(key, log.model_dump_json())
# Garder seulement les 1000 derniers logs
self.redis.ltrim(key, 0, 999)
async def clear_logs(self, container_id: str) -> None:
"""Supprime tous les logs d'un conteneur."""
key = f"{self.logs_key_prefix}{container_id}"
self.redis.delete(key)
async def get_container_ids(self) -> List[str]:
"""Récupère la liste des IDs de conteneurs ayant des logs."""
keys = self.redis.keys(f"{self.logs_key_prefix}*")
return [key.replace(self.logs_key_prefix, "") for key in keys]