import json import os import subprocess from datetime import datetime from db import db, Job, Run BACKUP_DIR = None # initialisé depuis app.config def execute_job(job_id): """Point d'entrée appelé par APScheduler (dans app_context Flask).""" from flask import current_app backup_dir = current_app.config["YUNOHOST_BACKUP_DIR"] instance = current_app.config["INSTANCE_NAME"] job = db.session.get(Job, job_id) if not job or not job.enabled: return run = Run(job_id=job_id, started_at=datetime.utcnow(), status="running") db.session.add(run) db.session.commit() try: if job.type == "ynh_app": archive_name, log = _run_ynh_app(job, instance, backup_dir) elif job.type == "ynh_system": archive_name, log = _run_ynh_system(job, instance, backup_dir) elif job.type in ("mysql", "postgresql"): from jobs.db_dump import run_db_dump archive_name, log = run_db_dump(job, instance, backup_dir) elif job.type == "custom_dir": from jobs.custom_dir import backup_custom_dir archive_name, log = backup_custom_dir(job, instance, backup_dir) else: raise ValueError(f"Type de job non géré : {job.type}") archive_path = os.path.join(backup_dir, archive_name + ".tar") from jobs.utils import sudo_getsize size_bytes = sudo_getsize(archive_path) or None run.status = "success" run.archive_name = archive_name run.size_bytes = size_bytes run.log_text = log from retention import apply_retention deleted = apply_retention(job, archive_name, backup_dir) if deleted: run.log_text += f"\n\nRétention : {len(deleted)} archive(s) supprimée(s) : {', '.join(deleted)}" # Transfert automatique vers la destination configurée if job.destination_id: from db import Destination from flask import current_app dest = db.session.get(Destination, job.destination_id) if dest and dest.enabled: data_dir = current_app.config["DATA_DIR"] try: from jobs.transfer import transfer_archive transfer_log = transfer_archive(archive_name, dest, backup_dir, data_dir) run.log_text += f"\n\nTransfert → {dest.remote_str} :\n{transfer_log}" except Exception as transfer_exc: run.log_text += f"\n\n⚠ Transfert échoué vers {dest.remote_str} :\n{transfer_exc}" except Exception as exc: run.status = "error" run.log_text = str(exc) finally: run.finished_at = datetime.utcnow() db.session.commit() try: from notifications import send_job_notification send_job_notification(run, job) except Exception: pass def _archive_name(instance, label): date_str = datetime.utcnow().strftime("%Y%m%d") return f"{instance}_{label}_{date_str}" def _run_ynh_app(job, instance, backup_dir): cfg = json.loads(job.config_json or "{}") app_id = cfg.get("app_id", "") core_only = cfg.get("core_only", job.core_only) archive = _archive_name(instance, app_id) _abort_if_exists(archive, backup_dir) cmd = ["sudo", "yunohost", "backup", "create", "--apps", app_id, "--name", archive] if core_only: cmd = ["sudo", "env", "BACKUP_CORE_ONLY=1"] + cmd[1:] result = subprocess.run(cmd, capture_output=True, text=True, timeout=3600) log = (result.stdout + result.stderr).strip() if result.returncode != 0: raise RuntimeError(f"yunohost backup create a échoué (code {result.returncode}) :\n{log}") return archive, log def _run_ynh_system(job, instance, backup_dir): archive = _archive_name(instance, "system") _abort_if_exists(archive, backup_dir) cmd = ["sudo", "yunohost", "backup", "create", "--system", "--name", archive] result = subprocess.run(cmd, capture_output=True, text=True, timeout=3600) log = (result.stdout + result.stderr).strip() if result.returncode != 0: raise RuntimeError(f"yunohost backup create a échoué (code {result.returncode}) :\n{log}") return archive, log def _abort_if_exists(archive_name, backup_dir): path = os.path.join(backup_dir, archive_name + ".tar") if os.path.exists(path): raise RuntimeError( f"L'archive {archive_name}.tar existe déjà. " "Supprimez-la manuellement ou attendez le prochain cycle." )