| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270 |
- import json
- import os
- import subprocess
- import tempfile
- import time
- from datetime import datetime
- def run_db_dump(job, instance, backup_dir):
- """Point d'entrée commun mysql et postgresql."""
- if job.type == "mysql":
- return _run_mysql(job, instance, backup_dir)
- elif job.type == "postgresql":
- return _run_postgresql(job, instance, backup_dir)
- raise ValueError(f"Type inconnu pour db_dump : {job.type}")
- # ---------------------------------------------------------------------------
- # MySQL
- # ---------------------------------------------------------------------------
- def _run_mysql(job, instance, backup_dir):
- from flask import current_app
- cfg = json.loads(job.config_json or "{}")
- dbname = cfg.get("database", "")
- if not dbname:
- raise ValueError("Nom de base de données manquant dans la configuration du job.")
- archive_name = _archive_name(instance, "mysql", dbname, backup_dir)
- with tempfile.TemporaryDirectory() as tmpdir:
- dump_path = os.path.join(tmpdir, f"{dbname}.sql")
- result = subprocess.run(
- [
- "sudo", "mysqldump",
- "--single-transaction",
- "--routines",
- "--triggers",
- "--result-file", dump_path,
- dbname,
- ],
- capture_output=True,
- text=True,
- timeout=7200,
- )
- log = (result.stdout + result.stderr).strip()
- if result.returncode != 0:
- raise RuntimeError(f"mysqldump a échoué (code {result.returncode}) :\n{log}")
- _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
- instance, current_app.config.get("INSTANCE_URL", ""))
- return archive_name, log or "mysqldump terminé sans sortie."
- # ---------------------------------------------------------------------------
- # PostgreSQL
- # ---------------------------------------------------------------------------
- def _run_postgresql(job, instance, backup_dir):
- from flask import current_app
- cfg = json.loads(job.config_json or "{}")
- dbname = cfg.get("database", "")
- if not dbname:
- raise ValueError("Nom de base de données manquant dans la configuration du job.")
- archive_name = _archive_name(instance, "postgresql", dbname, backup_dir)
- with tempfile.TemporaryDirectory() as tmpdir:
- dump_path = os.path.join(tmpdir, f"{dbname}.sql")
- # pg_dump doit tourner en tant qu'utilisateur postgres
- result = subprocess.run(
- ["sudo", "-u", "postgres", "pg_dump", "--format=plain", dbname],
- capture_output=True,
- timeout=7200,
- )
- if result.returncode != 0:
- log = result.stderr.decode("utf-8", errors="replace").strip()
- raise RuntimeError(f"pg_dump a échoué (code {result.returncode}) :\n{log}")
- with open(dump_path, "wb") as f:
- f.write(result.stdout)
- log = result.stderr.decode("utf-8", errors="replace").strip()
- _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
- instance, current_app.config.get("INSTANCE_URL", ""))
- return archive_name, log or "pg_dump terminé sans sortie."
- # ---------------------------------------------------------------------------
- # Helpers partagés
- # ---------------------------------------------------------------------------
- def _archive_name(instance, db_type, dbname, backup_dir):
- from jobs.utils import unique_archive_name
- date_str = datetime.utcnow().strftime("%Y%m%d")
- return unique_archive_name(f"{instance}_{db_type}_{dbname}_{date_str}", backup_dir)
- # ---------------------------------------------------------------------------
- # Restore
- # ---------------------------------------------------------------------------
- def restore_db_dump(archive_name, backup_dir):
- """Restauration d'une base MySQL ou PostgreSQL depuis une archive BackupManager."""
- archive_path = os.path.join(backup_dir, archive_name + ".tar")
- from jobs.utils import sudo_exists
- if not sudo_exists(archive_path):
- raise FileNotFoundError(f"Archive introuvable : {archive_path}")
- info = _read_backup_info(archive_path)
- db_type = info.get("type")
- dbname = info.get("database", "")
- if not dbname:
- raise ValueError("Nom de base de données introuvable dans backup_info.json.")
- with tempfile.TemporaryDirectory() as tmpdir:
- dump_path = os.path.join(tmpdir, f"{dbname}.sql")
- # Extraction du dump depuis l'archive via sudo (backup_dir est 750 root)
- result = subprocess.run(
- ["sudo", "tar", "-xOf", archive_path, f"db/{dbname}.sql"],
- capture_output=True,
- )
- if result.returncode != 0:
- err = result.stderr.decode("utf-8", errors="replace").strip()
- raise RuntimeError(f"Extraction du dump échouée : {err}")
- with open(dump_path, "wb") as dst:
- dst.write(result.stdout)
- if db_type == "mysql":
- return _restore_mysql(dbname, dump_path)
- elif db_type == "postgresql":
- return _restore_postgresql(dbname, dump_path)
- else:
- raise ValueError(f"Type de base inconnu dans l'archive : {db_type}")
- def _restore_mysql(dbname, dump_path):
- log_lines = []
- # Suppression + recréation propre de la base
- result = subprocess.run(
- ["sudo", "mysql", "-e",
- f"DROP DATABASE IF EXISTS `{dbname}`; CREATE DATABASE `{dbname}` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"],
- capture_output=True, text=True, timeout=60,
- )
- if result.returncode != 0:
- raise RuntimeError(f"Impossible de recréer la base MySQL '{dbname}' :\n{result.stderr.strip()}")
- log_lines.append(f"Base MySQL '{dbname}' recréée.")
- # Restauration du dump
- with open(dump_path, "rb") as f:
- result = subprocess.run(
- ["sudo", "mysql", dbname],
- stdin=f,
- capture_output=True,
- timeout=7200,
- )
- log = result.stderr.decode("utf-8", errors="replace").strip()
- if result.returncode != 0:
- raise RuntimeError(f"mysql restore a échoué (code {result.returncode}) :\n{log}")
- log_lines.append(f"Dump restauré dans '{dbname}'.")
- if log:
- log_lines.append(log)
- return "\n".join(log_lines)
- def _restore_postgresql(dbname, dump_path):
- log_lines = []
- # Terminer les connexions actives puis drop + recreate
- subprocess.run(
- ["sudo", "-u", "postgres", "psql", "-c",
- f"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '{dbname}' AND pid <> pg_backend_pid();"],
- capture_output=True, timeout=30,
- )
- subprocess.run(
- ["sudo", "-u", "postgres", "dropdb", "--if-exists", dbname],
- capture_output=True, timeout=60,
- )
- result = subprocess.run(
- ["sudo", "-u", "postgres", "createdb", dbname],
- capture_output=True, text=True, timeout=60,
- )
- if result.returncode != 0:
- raise RuntimeError(f"Impossible de recréer la base PostgreSQL '{dbname}' :\n{result.stderr.strip()}")
- log_lines.append(f"Base PostgreSQL '{dbname}' recréée.")
- # Restauration du dump
- with open(dump_path, "rb") as f:
- result = subprocess.run(
- ["sudo", "-u", "postgres", "psql", "-d", dbname, "-v", "ON_ERROR_STOP=1"],
- stdin=f,
- capture_output=True,
- timeout=7200,
- )
- log = result.stderr.decode("utf-8", errors="replace").strip()
- if result.returncode != 0:
- raise RuntimeError(f"psql restore a échoué (code {result.returncode}) :\n{log}")
- log_lines.append(f"Dump restauré dans '{dbname}'.")
- if log:
- log_lines.append(log)
- return "\n".join(log_lines)
- def _read_backup_info(archive_path):
- from jobs.utils import sudo_read_backup_info
- return sudo_read_backup_info(archive_path)
- def _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job, instance, instance_url):
- """Crée le .tar dans tmpdir puis le copie dans backup_dir via sudo rsync."""
- import json as _json
- from jobs.utils import sudo_getsize
- # backup_info.json embarqué dans le tar
- info = {
- "instance_name": instance,
- "instance_url": instance_url,
- "type": job.type,
- "database": dbname,
- "created_at": datetime.utcnow().isoformat(),
- "backupmanager_version": "1.0.0",
- }
- info_path = os.path.join(tmpdir, "backup_info.json")
- with open(info_path, "w") as f:
- _json.dump(info, f, indent=2)
- # Créer le tar dans tmpdir (accessible par backupmanager)
- tmp_archive = os.path.join(tmpdir, archive_name + ".tar")
- with tarfile.open(tmp_archive, "w") as tar:
- tar.add(dump_path, arcname=f"db/{dbname}.sql")
- tar.add(info_path, arcname="backup_info.json")
- # Copier vers backup_dir via sudo rsync (backup_dir est 750 root)
- archive_path = os.path.join(backup_dir, archive_name + ".tar")
- result = subprocess.run(
- ["sudo", "rsync", tmp_archive, archive_path],
- capture_output=True, text=True,
- )
- if result.returncode != 0:
- raise RuntimeError(f"Copie de l'archive échouée : {result.stderr.strip()}")
- # .info.json YunoHost dans tmpdir puis copie via sudo rsync
- size = sudo_getsize(archive_path)
- ynh_info = {
- "created_at": int(time.time()),
- "description": f"BackupManager: {job.type} {dbname}",
- "size": size,
- "from_before_upgrade": False,
- "apps": {},
- "system": {},
- }
- tmp_ynh_info = os.path.join(tmpdir, archive_name + ".info.json")
- with open(tmp_ynh_info, "w") as f:
- _json.dump(ynh_info, f, indent=2)
- subprocess.run(
- ["sudo", "rsync", tmp_ynh_info,
- os.path.join(backup_dir, archive_name + ".info.json")],
- capture_output=True,
- )
|