Skip to content
Open
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ Contributions are welcome regardless of experience level.

## Python environment

Use [`uv`](https://docs.astral.sh/uv/) within the `src/` directory to manage your development environment.
Use [`uv`](https://docs.astral.sh/uv/) within the the repo root directory to manage your development environment.

```bash
git clone https://github.com/lawndoc/stack-back.git
Expand Down
11 changes: 9 additions & 2 deletions src/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,13 @@ rcb dump-env > /.env
# Write crontab
rcb crontab > crontab

# start cron in the foreground
# Start cron in the background and capture its PID
crontab crontab
crond -f
crond -f &
CRON_PID=$!

# Trap termination signals and kill the cron process
trap 'kill $CRON_PID; exit 0' TERM INT

# Wait for cron and handle signals
wait $CRON_PID
5 changes: 2 additions & 3 deletions src/restic_compose_backup/backup_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def run(
volumes: dict = None,
environment: dict = None,
labels: dict = None,
source_container_id: str = None,
source_container_network: str = None,
Copy link
Owner

@lawndoc lawndoc Oct 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'll need to pass in the list of networks for the runner to be attached to so it can reach all the containers it is backing up.

Suggested change
source_container_network: str = None,
source_container_network: str = None,
backup_networks: list = None,

):
logger.info("Starting backup container")
client = utils.docker_client()
Expand All @@ -21,11 +21,10 @@ def run(
image,
command,
labels=labels,
# auto_remove=True, # We remove the container further down
detach=True,
environment=environment + ["BACKUP_PROCESS_CONTAINER=true"],
volumes=volumes,
network_mode=f"container:{source_container_id}", # Reuse original container's network stack.
network=source_container_network,
working_dir=os.getcwd(),
tty=True,
)
Expand Down
4 changes: 2 additions & 2 deletions src/restic_compose_backup/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def status(config, containers):
logger.info("-" * 67)


def backup(config, containers):
def backup(config, containers: RunningContainers):
"""Request a backup to start"""
# Make sure we don't spawn multiple backup processes
if containers.backup_process_running:
Expand Down Expand Up @@ -169,7 +169,7 @@ def backup(config, containers):
command="rcb start-backup-process",
volumes=volumes,
environment=containers.this_container.environment,
source_container_id=containers.this_container.id,
source_container_network=containers.this_container.network_name,
Copy link
Owner

@lawndoc lawndoc Oct 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'll have to pass the backup_networks in here from the suggested new RunningContainers.networks_for_backup method

Suggested change
source_container_network=containers.this_container.network_name,
source_container_network=containers.this_container.network_name,
backup_networks=containers.networks_for_backup(),

labels={
containers.backup_process_label: "True",
"com.docker.compose.project": containers.project_name,
Expand Down
3 changes: 3 additions & 0 deletions src/restic_compose_backup/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ def __init__(self, check=True):
self.swarm_mode = os.environ.get("SWARM_MODE") or False
self.include_project_name = os.environ.get("INCLUDE_PROJECT_NAME") or False
self.exclude_bind_mounts = os.environ.get("EXCLUDE_BIND_MOUNTS") or False
self.include_all_compose_projects = (
os.environ.get("INCLUDE_ALL_COMPOSE_PROJECTS") or False
)
self.include_all_volumes = os.environ.get("INCLUDE_ALL_VOLUMES") or False
if self.include_all_volumes:
logger.warning(
Expand Down
46 changes: 28 additions & 18 deletions src/restic_compose_backup/containers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import logging
from pathlib import Path
import socket
from typing import List

from restic_compose_backup import enums, utils
Expand Down Expand Up @@ -57,9 +57,22 @@ def id(self) -> str:
return self._data.get("Id")

@property
def hostname(self) -> str:
"""Hostname of the container"""
return self.get_config("Hostname", default=self.id[0:12])
def network_details(self) -> dict:
"""dict: The network details of the container"""
network_settings: dict = self._data.get("NetworkSettings", {})
networks: dict = network_settings.get("Networks", {})
first_network = list(networks.values())[0]
return first_network
Comment on lines +60 to +65
Copy link
Owner

@lawndoc lawndoc Oct 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will need to return the full list of networks to be passed into the backup runner startup step later so we can call Network.connect() to add our backup runner to each network.

Suggested change
def network_details(self) -> dict:
"""dict: The network details of the container"""
network_settings: dict = self._data.get("NetworkSettings", {})
networks: dict = network_settings.get("Networks", {})
first_network = list(networks.values())[0]
return first_network
def network_details(self) -> list:
"""list: The network details of the container"""
network_settings: dict = self._data.get("NetworkSettings", {})
networks: dict = network_settings.get("Networks", {})
return list(networks.values())


@property
def network_name(self) -> str:
"""str: The name of the network the container is connected to"""
return self.network_details.get("NetworkID", "")
Comment on lines +67 to +70
Copy link
Owner

@lawndoc lawndoc Oct 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is still needed for the initial network creation, but you will need to adapt this to select the first network within this function since network_details is now returning a list.

Suggested change
@property
def network_name(self) -> str:
"""str: The name of the network the container is connected to"""
return self.network_details.get("NetworkID", "")
@property
def network_name(self) -> str:
"""str: The name of the network the container is connected to"""
return self.network_details[0].get("NetworkID", "")


@property
def ip_address(self) -> str:
"""str: IP address of the container"""
return self.network_details.get("IPAddress", "")
Comment on lines +72 to +75
Copy link
Owner

@lawndoc lawndoc Oct 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can adapt this to select the first network within this function since network_details is now returning a list

Suggested change
@property
def ip_address(self) -> str:
"""str: IP address of the container"""
return self.network_details.get("IPAddress", "")
@property
def ip_address(self) -> str:
"""str: IP address of the container"""
return self.network_details[0].get("IPAddress", "")


@property
def image(self) -> str:
Expand Down Expand Up @@ -407,13 +420,13 @@ def __init__(self):
# Find the container we are running in.
# If we don't have this information we cannot continue
for container_data in all_containers:
if container_data.get("Id").startswith(os.environ["HOSTNAME"]):
if container_data.get("Id").startswith(socket.gethostname()):
self.this_container = Container(container_data)

if not self.this_container:
raise ValueError("Cannot find metadata for backup container")

# Gather all running containers in the current compose setup
# Gather relevant containers
for container_data in all_containers:
container = Container(container_data)

Expand All @@ -429,25 +442,22 @@ def __init__(self):
if not container.is_running:
continue

# If not swarm mode we need to filter in compose project
if (
not config.swarm_mode
and not config.include_all_compose_projects
and container.project_name != self.this_container.project_name
):
continue

# Gather stop during backup containers
if container.stop_during_backup:
if config.swarm_mode:
self.stop_during_backup_containers.append(container)
else:
if container.project_name == self.this_container.project_name:
self.stop_during_backup_containers.append(container)
self.stop_during_backup_containers.append(container)

# Detect running backup process container
if container.is_backup_process_container:
self.backup_process_container = container

# --- Determine what containers should be evaluated

# If not swarm mode we need to filter in compose project
if not config.swarm_mode:
if container.project_name != self.this_container.project_name:
continue

# Containers started manually are not included
if container.is_oneoff:
continue
Expand Down
6 changes: 3 additions & 3 deletions src/restic_compose_backup/containers_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def get_credentials(self) -> dict:
username = self.get_config_env("MARIADB_USER")
password = self.get_config_env("MARIADB_PASSWORD")
return {
"host": self.hostname,
"host": self.ip_address,
"username": username,
"password": password,
"port": "3306",
Expand Down Expand Up @@ -91,7 +91,7 @@ def get_credentials(self) -> dict:
username = self.get_config_env("MYSQL_USER")
password = self.get_config_env("MYSQL_PASSWORD")
return {
"host": self.hostname,
"host": self.ip_address,
"username": username,
"password": password,
"port": "3306",
Expand Down Expand Up @@ -155,7 +155,7 @@ class PostgresContainer(Container):
def get_credentials(self) -> dict:
"""dict: get credentials for the service"""
return {
"host": self.hostname,
"host": self.ip_address,
"username": self.get_config_env("POSTGRES_USER"),
"password": self.get_config_env("POSTGRES_PASSWORD"),
"port": "5432",
Expand Down
4 changes: 0 additions & 4 deletions src/restic_compose_backup/log.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import logging
import os
import sys

logger = logging.getLogger("restic_compose_backup")
HOSTNAME = os.environ["HOSTNAME"]

DEFAULT_LOG_LEVEL = logging.INFO
LOG_LEVELS = {
Expand All @@ -22,7 +20,5 @@ def setup(level: str = "warning"):

ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(level)
# ch.setFormatter(logging.Formatter('%(asctime)s - {HOSTNAME} - %(name)s - %(levelname)s - %(message)s'))
# ch.setFormatter(logging.Formatter('%(asctime)s - {HOSTNAME} - %(levelname)s - %(message)s'))
ch.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s: %(message)s"))
logger.addHandler(ch)
20 changes: 13 additions & 7 deletions src/tests/tests.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import json
import os
import unittest
from unittest import mock
Expand All @@ -16,16 +15,21 @@
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up basic environment variables"""
# os.environ['RESTIC_REPOSITORY'] = "test"
# os.environ['RESTIC_PASSWORD'] = "password"
cls.backup_hash = fixtures.generate_sha256()

cls.hostname_patcher = mock.patch(
"socket.gethostname", return_value=cls.backup_hash[:8]
)
cls.hostname_patcher.start()

@classmethod
def tearDownClass(cls):
cls.hostname_patcher.stop()

def createContainers(self):
backup_hash = fixtures.generate_sha256()
os.environ["HOSTNAME"] = backup_hash[:8]
return [
{
"id": backup_hash,
"id": self.backup_hash,
"service": "backup",
}
]
Expand Down Expand Up @@ -377,10 +381,12 @@ def test_stop_container_during_backup_database(self):
class IncludeAllVolumesTests(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
config.config.auto_backup_all = "true"

@classmethod
def tearDownClass(cls):
super().tearDownClass()
config.config = config.Config()

def test_all_volumes(self):
Expand Down