db_dump.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. import json
  2. import os
  3. import subprocess
  4. import tarfile
  5. import tempfile
  6. import time
  7. from datetime import datetime
  8. from db import db, Job, Run
  9. def run_db_dump(job, instance, backup_dir):
  10. """Point d'entrée commun mysql et postgresql."""
  11. if job.type == "mysql":
  12. return _run_mysql(job, instance, backup_dir)
  13. elif job.type == "postgresql":
  14. return _run_postgresql(job, instance, backup_dir)
  15. raise ValueError(f"Type inconnu pour db_dump : {job.type}")
  16. # ---------------------------------------------------------------------------
  17. # MySQL
  18. # ---------------------------------------------------------------------------
  19. def _run_mysql(job, instance, backup_dir):
  20. from flask import current_app
  21. cfg = json.loads(job.config_json or "{}")
  22. dbname = cfg.get("database", "")
  23. if not dbname:
  24. raise ValueError("Nom de base de données manquant dans la configuration du job.")
  25. archive_name = _archive_name(instance, "mysql", dbname)
  26. _abort_if_exists(archive_name, backup_dir)
  27. with tempfile.TemporaryDirectory() as tmpdir:
  28. dump_path = os.path.join(tmpdir, f"{dbname}.sql")
  29. result = subprocess.run(
  30. [
  31. "sudo", "mysqldump",
  32. "--single-transaction",
  33. "--routines",
  34. "--triggers",
  35. "--result-file", dump_path,
  36. dbname,
  37. ],
  38. capture_output=True,
  39. text=True,
  40. timeout=7200,
  41. )
  42. log = (result.stdout + result.stderr).strip()
  43. if result.returncode != 0:
  44. raise RuntimeError(f"mysqldump a échoué (code {result.returncode}) :\n{log}")
  45. _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
  46. instance, current_app.config.get("INSTANCE_URL", ""))
  47. return archive_name, log or "mysqldump terminé sans sortie."
  48. # ---------------------------------------------------------------------------
  49. # PostgreSQL
  50. # ---------------------------------------------------------------------------
  51. def _run_postgresql(job, instance, backup_dir):
  52. from flask import current_app
  53. cfg = json.loads(job.config_json or "{}")
  54. dbname = cfg.get("database", "")
  55. if not dbname:
  56. raise ValueError("Nom de base de données manquant dans la configuration du job.")
  57. archive_name = _archive_name(instance, "postgresql", dbname)
  58. _abort_if_exists(archive_name, backup_dir)
  59. with tempfile.TemporaryDirectory() as tmpdir:
  60. dump_path = os.path.join(tmpdir, f"{dbname}.sql")
  61. # pg_dump doit tourner en tant qu'utilisateur postgres
  62. result = subprocess.run(
  63. ["sudo", "-u", "postgres", "pg_dump", "--format=plain", dbname],
  64. capture_output=True,
  65. timeout=7200,
  66. )
  67. if result.returncode != 0:
  68. log = result.stderr.decode("utf-8", errors="replace").strip()
  69. raise RuntimeError(f"pg_dump a échoué (code {result.returncode}) :\n{log}")
  70. with open(dump_path, "wb") as f:
  71. f.write(result.stdout)
  72. log = result.stderr.decode("utf-8", errors="replace").strip()
  73. _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job,
  74. instance, current_app.config.get("INSTANCE_URL", ""))
  75. return archive_name, log or "pg_dump terminé sans sortie."
  76. # ---------------------------------------------------------------------------
  77. # Helpers partagés
  78. # ---------------------------------------------------------------------------
  79. def _archive_name(instance, db_type, dbname):
  80. date_str = datetime.utcnow().strftime("%Y%m%d")
  81. return f"{instance}_{db_type}_{dbname}_{date_str}"
  82. def _abort_if_exists(archive_name, backup_dir):
  83. path = os.path.join(backup_dir, archive_name + ".tar")
  84. if os.path.exists(path):
  85. raise RuntimeError(
  86. f"L'archive {archive_name}.tar existe déjà. "
  87. "Supprimez-la ou attendez le prochain cycle."
  88. )
  89. def _write_tar(tmpdir, dump_path, dbname, archive_name, backup_dir, job, instance, instance_url):
  90. """Crée le .tar dans backup_dir et le .info.json YunoHost à côté."""
  91. archive_path = os.path.join(backup_dir, archive_name + ".tar")
  92. # backup_info.json embarqué dans le tar (métadonnées BackupManager)
  93. import json as _json
  94. info = {
  95. "instance_name": instance,
  96. "instance_url": instance_url,
  97. "type": job.type,
  98. "database": dbname,
  99. "created_at": datetime.utcnow().isoformat(),
  100. "backupmanager_version": "1.0.0",
  101. }
  102. info_path = os.path.join(tmpdir, "backup_info.json")
  103. with open(info_path, "w") as f:
  104. _json.dump(info, f, indent=2)
  105. with tarfile.open(archive_path, "w") as tar:
  106. tar.add(dump_path, arcname=f"db/{dbname}.sql")
  107. tar.add(info_path, arcname="backup_info.json")
  108. # .info.json YunoHost (hors tar, requis pour listing webadmin)
  109. size = os.path.getsize(archive_path)
  110. ynh_info = {
  111. "created_at": int(time.time()),
  112. "description": f"BackupManager: {job.type} {dbname}",
  113. "size": size,
  114. "from_before_upgrade": False,
  115. "apps": {},
  116. "system": {},
  117. }
  118. ynh_info_path = os.path.join(backup_dir, archive_name + ".info.json")
  119. with open(ynh_info_path, "w") as f:
  120. _json.dump(ynh_info, f, indent=2)