Configuration reference

There are 3 sections:

  • Access: Describes authorization details, name it eg. server1 and put url and token
  • Encryption: Encryption type and password (if any) to encrypt your files stored on File Repository
  • Backups: Describes where is your data, how to access it and under which COLLECTION to send it to File Repository
  • Recoveries: Recovery plans. A policy + list of “backups” to restore within a single command

Example scenario:

  1. You have a server under https://backups.iwa-ait.org and token “XXX-YYY-ZZZ-123”, you name it “ait_backups” under access section
  2. You want to have encrypted backups using AES 256 CBC, then you add “ait_secret” under encryption with passphrase “something-secret” and type “aes-256-cbc”
  3. Next you want to define where is the data, in our example it’s in a docker container under /var/lib/mysql and we want to send this data to collection “123-456-789-000”. You should reference “ait_backups” access and “ait_secret” as the encryption method for your backup there.

Environment variables

If you want to use environment variables, use bash-like syntax ${SOME_ENV_NAME}.

NOTE: In case you will not set a variable in the shell, then application will not start, it will throw a configuration error.

Application configuration

Notice: Below example uses environment variables eg. ${DB_HOST}, you may want to replace them with values like localhost or others

#
# Access information about the storage server (File Repository), the URL, token
# In "backups" section you should use name of an access as a reference in field "access"
#
accesses:
    backup_one:
        url: http://localhost:8000
        token: test-token-full-permissions

#
# Allows to define an encryption and identify it with name
# In "backups" section you should use just the name as reference in "encryption" key of a backup
#
encryption:

    #
    # Example of AES-256-CBC encryption (uses OpenSSL)
    #
    #   Decryption command example:
    #     cat 0ad166b4cfbackup.tar-v5.gz| openssl enc -aes-256-cbc -pbkdf2 -d -pass pass:some-encryption-key-here > ./backup.tar.gz
    #
    enc1:
        passphrase: some-encryption-key-here
        method: aes-256-cbc      # possible values: aes-256-cbc, aes-256-ecb, aes-128-cbc, aes-128-ecb, des-ecb
        #encrypt_cmd: openssl enc -%method% -pass pass:%pass%
        #decrypt_cmd: openssl enc -d -%method% -pass pass:%pass%

    none:
        passphrase: ""
        method: ""


#
# Backups - list of backups, each backup = single archive = single collection.
#           To backup invoke the shell command with a backup name eg. bahub backup www_uploads
#           To restore invoke eg. bahub restore www_uploads latest
#
backups:

    #
    # Online / Hot-backup of docker container example
    #
    #   Copies data from running container.
    #   NOTICE: May be dangerous to your data, please make sure you know what you do.
    #           In most cases please choose "docker_volumes" method for safe, offline backup.
    #
    docker_hot_volumes_example:
        type: docker_hot_volumes
        container: "test_www"
        access: backup_one
        encryption: none
        collection_id: "${COLLECTION_ID}"
        paths:
            - /var/www
            - /var/log

        # optional
        #tar_pack_cmd: "tar -czf %stdin% %paths%"
        #tar_unpack_cmd: "tar -xzf %stdin% %target%"
        #docker_bin: "sudo docker"

    #
    # Offline backup of docker container
    #
    #   Stops the container, copies the data, then starts it again
    #   Fully safe method of backup for all types of services. Makes a downtime, but guarantees a non-corrupted
    #   backup data.
    #
    www_docker_offline:
        type: docker_volumes
        container: "test_www"
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"
        paths:
            - /etc
            - /var/lib/mysql
            - /var/log

        # optional
        docker_bin: "sudo docker"
        tar_pack_cmd: "tar -czf %stdin% %paths%"
        tar_unpack_cmd: "tar -xzf %stdin% %target%"
        temp_image_name: "alpine:3.9"
        temp_image_cmd: "apk add --update xz bzip2 && sleep 3600"

    #
    # MySQL online backup using mysqldump
    #
    #   Can backup a single database or all databases from any MySQL server, local, in-docker or remote.
    #   Notice: Restoring backups is only possible for SINGLE DATABASES
    #
    mysql_native_single_database:
        type: mysql
        host: "${DB_HOST}"
        port: 3306
        user: root
        password: root
        database: "${DB_DATABASE}"
        access: backup_one
        encryption: none
        collection_id: "${COLLECTION_ID}"

    #
    # MySQL online backup using mysqldump (DOCKER)
    #
    #   Can backup a single database or all databases from any MySQL server, local, in-docker or remote.
    #   Notice: Restoring backups is only possible for SINGLE DATABASES
    #
    mysql_docker_single_database:
        type: mysql
        host: localhost
        port: 3306
        user: root
        password: root
        database: "${DB_DATABASE}"
        container: "test_mysql"    # this one is required to use a docker container
        access: backup_one
        encryption: none
        collection_id: "${COLLECTION_ID}"

        # optional:
        #tar_pack_cmd: "tar -czf %stdin% %paths%"
        #tar_unpack_cmd: "tar -xzf %stdin% %target%"

    #
    # Docker command output online backup
    #
    #   Executes command in a Docker container and captures output
    #
    docker_command_output:
        type: docker_output
        container: "test_www"
        command: "cat /bin/sh"
        restore_command: "cat - > /tmp/sh.restored"
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"

        # optional
        #docker_bin: "sudo docker"

    #
    # PostgreSQL backup using pg_dumpall. For restore psql is used.
    #
    postgres:
        type: postgres
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"
        host: "localhost"
        port: 5432
        user: "${POSTGRES_USER}"
        password: "${POSTGRES_PASSWORD}"
        database: ""

    #
    # PostgreSQL backup using pg_dump to dump single database. For restore psql is used.
    #
    postgres_single_db:
        type: postgres
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"
        host: "localhost"
        port: 5432
        user: "${POSTGRES_USER}"
        password: "${POSTGRES_PASSWORD}"
        database: "some_db_name"

    #
    # PostgreSQL backup using pg_basebackup command (replication mode backup)
    #
    postgres_base:
        type: postgres_base
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"
        host: "localhost"
        port: 5432
        user: "${POSTGRES_USER}"
        password: "${POSTGRES_PASSWORD}"
        database: "${POSTGRES_DB}"

        server_shutdown_cmd: "sudo docker stop fr_tests_postgresql_1" # can there be pg_ctl used, or systemctl also
        server_start_cmd: "sudo docker start fr_tests_postgresql_1"
        restore_dir: "${CONFIG_DIR}/.data/postgresql"
        old_dir: "${CONFIG_DIR}/.data/postgresql-old"
        ownership: "70:70"   # postgres user id and group, can be also names. For docker containers it is recommended to use ids

    #
    # Execute a local command
    #
    #   Executes command on this machine/environment on local shell and captures output
    #
    local_command_output:
        type: command_output
        command: "cat /bin/bash"
        restore_command: "cat - > /tmp/bash.restored"
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"

    #
    # Local file/directory backup
    #
    #   Packs a local directory or file
    #
    some_local_dir:
        type: directory
        paths:
            - /tmp/test
        access: backup_one
        encryption: enc1
        collection_id: "${COLLECTION_ID}"

        # optional
        #tar_pack_cmd: "tar -czf %stdin% %paths%"
        #tar_unpack_cmd: "tar -xzf %stdin% %target%"

#
# Recovery plans
#   Restores multiple backups in order, using single command
#
#   Possible values:
#     policy:
#       - restore-whats-possible: Ignore things that cannot be restored, restore what is posible
#       - stop-on-first-error: Restore until first error, then stay as it is
#
recoveries:
    default:
        policy: restore-whats-possible
        definitions: all

    some_selected_only:
        policy: stop-on-first-error
        definitions:
            - some_local_dir
            - local_command_output

#
# Optional: Error handlers, very helpful to track backup errors
#           Be careful! Your shell commands for backup/restore may fail some time, it's worth to have a notification.
#           You can set up a free account on sentry.io, install a Sentry instance on your server, or use eg. Mattermost/Slack notifications
#
#error_handlers:
#    remote_sentry:
#        type: sentry
#        url: "${SENTRY_IO}"


#
# Optional: Notifications. Can notify when backup starts, ends, there is an error etc.
#

#notifiers:
#    mattermost:
#        type: slack
#        url: "https://xxxxx"