mirror of
https://github.com/immich-app/immich.git
synced 2024-11-15 09:59:00 -07:00
7404688622
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
82 lines
2.5 KiB
YAML
82 lines
2.5 KiB
YAML
version: "3.8"
|
|
|
|
#
|
|
# WARNING: Make sure to use the docker-compose.yml of the current release:
|
|
#
|
|
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
|
#
|
|
# The compose file on main may not be compatible with the latest release.
|
|
#
|
|
|
|
name: immich
|
|
|
|
services:
|
|
immich-server:
|
|
container_name: immich_server
|
|
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
|
command: [ "start.sh", "immich" ]
|
|
volumes:
|
|
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
|
- /etc/localtime:/etc/localtime:ro
|
|
env_file:
|
|
- .env
|
|
ports:
|
|
- 2283:3001
|
|
depends_on:
|
|
- redis
|
|
- database
|
|
restart: always
|
|
|
|
immich-microservices:
|
|
container_name: immich_microservices
|
|
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
|
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/hardware-transcoding
|
|
# file: hwaccel.transcoding.yml
|
|
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
|
command: [ "start.sh", "microservices" ]
|
|
volumes:
|
|
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
|
- /etc/localtime:/etc/localtime:ro
|
|
env_file:
|
|
- .env
|
|
depends_on:
|
|
- redis
|
|
- database
|
|
restart: always
|
|
|
|
immich-machine-learning:
|
|
container_name: immich_machine_learning
|
|
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
|
|
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
|
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
|
|
# file: hwaccel.ml.yml
|
|
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
|
volumes:
|
|
- model-cache:/cache
|
|
env_file:
|
|
- .env
|
|
restart: always
|
|
|
|
redis:
|
|
container_name: immich_redis
|
|
image: redis:6.2-alpine@sha256:60727c1c07f014f3ee3f90f4326792c39dcea8b3fd118be467f0b5e2b75a455d
|
|
restart: always
|
|
|
|
database:
|
|
container_name: immich_postgres
|
|
image: tensorchord/pgvecto-rs:pg14-v0.1.11@sha256:0335a1a22f8c5dd1b697f14f079934f5152eaaa216c09b61e293be285491f8ee
|
|
env_file:
|
|
- .env
|
|
environment:
|
|
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
|
POSTGRES_USER: ${DB_USERNAME}
|
|
POSTGRES_DB: ${DB_DATABASE_NAME}
|
|
volumes:
|
|
- pgdata:/var/lib/postgresql/data
|
|
restart: always
|
|
|
|
volumes:
|
|
pgdata:
|
|
model-cache:
|