Network stack reworked and some cleaning

master
Meliurwen 4 years ago
parent d09238210c
commit 14c866845f
Signed by: meliurwen
GPG Key ID: 818A8B35E9F1CE10
  1. 17
      .env.example
  2. 21
      docker-compose.yml
  3. 9
      shared.env.example

@ -1,9 +1,14 @@
# Global Settings
TZ=Etc/UTC
LOCAL_STACK_DIR=/srv/docker/volumes/funkwhale
HOST_MUSIC_DIRECTORY_SERVE_PATH=/srv/docker/volumes/funkwhale/media/import_music
HOST_DB_DIR=/srv/docker/volumes/funkwhale/postgres/data
HOST_CACHE_DIR=/srv/docker/volumes/funkwhale/redis/data
# In-place import dir
HOST_MUSIC_DIRECTORY_SERVE_PATH=/srv/docker/volumes/funkwhale/media/music
# Where media files (such as album covers or audio tracks) should be stored
HOST_MEDIA_ROOT=/srv/docker/volumes/funkwhale/media/media
# Where static files (such as API css or icons) should be compiled
HOST_STATIC_ROOT=/srv/docker/volumes/funkwhale/api/static
# Why this exists?
HOST_FUNKWHALE_FRONTEND_PATH=/srv/docker/volumes/funkwhale/api/front/dist
# Replace this by the definitive, public domain you will use for
# your instance. It cannot be changed after initial deployment
@ -38,11 +43,11 @@ WB_RESTART=
# celeryworker (optional)
CW_CONTAINER_NAME=
CW_RESTART=
# Number of worker processes to execute. Defaults to 0, in which case it uses your number of CPUs
# Number of worker processes to execute.
# Defaults to 0, in which case it uses your number of CPUs.
# Celery workers handle background tasks (such file imports or federation
# messaging). The more processes a worker gets, the more tasks
# can be processed in parallel. However, more processes also means
# a bigger memory footprint.
# messaging). The more processes a worker gets, the more tasks can be processed
# in parallel. However, more processes also means a bigger memory footprint.
CELERYD_CONCURRENCY=
# celerybeat (optional)

@ -6,7 +6,7 @@ services:
container_name: ${DB_CONTAINER_NAME:-funkwhale-db}
restart: ${DB_RESTART:-unless-stopped}
volumes:
- ${LOCAL_STACK_DIR}/postgres/data:/var/lib/postgresql/data
- ${HOST_DB_DIR}:/var/lib/postgresql/data
expose:
- 5432
env_file:
@ -17,7 +17,7 @@ services:
container_name: ${RD_CONTAINER_NAME:-funkwhale-cache}
restart: ${RD_RESTART:-unless-stopped}
volumes:
- ${LOCAL_STACK_DIR}/redis/data:/data
- ${HOST_CACHE_DIR}:/data
expose:
- 6379
@ -25,6 +25,9 @@ services:
image: funkwhale/funkwhale:${FUNKWHALE_VERSION:-latest}
container_name: ${CW_CONTAINER_NAME:-funkwhale-celeryworker}
restart: ${CW_RESTART:-unless-stopped}
networks:
- default
- internet-access
depends_on:
- postgres
- redis
@ -37,14 +40,6 @@ services:
environment:
- C_FORCE_ROOT=true
- FUNKWHALE_HOSTNAME=${FUNKWHALE_HOSTNAME}
# Celery workers handle background tasks (such file imports or federation
# messaging). The more processes a worker gets, the more tasks
# can be processed in parallel. However, more processes also means
# a bigger memory footprint.
# By default, a worker will span a number of process equal to your number
# of CPUs. You can adjust this, by explicitly setting the --concurrency
# flag:
# celery -A funkwhale_api.taskapp worker -l INFO --concurrency=4
command: celery -A funkwhale_api.taskapp worker -l INFO --concurrency=${CELERYD_CONCURRENCY:-0}
celerybeat:
@ -56,7 +51,6 @@ services:
- redis
env_file:
- all.env
- shared.env # Probably this is not necessary
environment:
- FUNKWHALE_HOSTNAME=${FUNKWHALE_HOSTNAME} # This is necessary
command: celery -A funkwhale_api.taskapp beat --pidfile= -l INFO
@ -65,6 +59,9 @@ services:
image: funkwhale/funkwhale:${FUNKWHALE_VERSION:-latest}
container_name: ${AP_CONTAINER_NAME:-funkwhale-api}
restart: ${AP_RESTART:-unless-stopped}
networks:
- default
- internet-access
depends_on:
- postgres
- redis
@ -112,6 +109,8 @@ services:
networks:
default:
internal: true
internet-access:
main-webservices:
external:
name: ${NETWORK}

@ -0,0 +1,9 @@
# You shouldn't touch these settings
# They are here only because Funkwhale needs them declared
MUSIC_DIRECTORY_PATH=/srv/wwww/music
MUSIC_DIRECTORY_SERVE_PATH=/srv/wwww/music
MEDIA_ROOT=/srv/wwww/media
STATIC_ROOT=/srv/wwww/static
FUNKWHALE_FRONTEND_PATH=/srv/wwww/frontend
Loading…
Cancel
Save