2024-06-28 17:37:52 +08:00
x-shared-env : &shared-api-worker-env
# The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL : ${LOG_LEVEL:-INFO}
# Debug mode, default is false. It is recommended to turn on this configuration for local development to prevent some problems caused by monkey patch.
DEBUG : ${DEBUG:-false}
# Flask debug mode, it can output trace information at the interface when turned on, which is convenient for debugging.
FLASK_DEBUG : ${FLASK_DEBUG:-false}
# A secretkey that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY : ${SECRET_KEY}
# Password for admin user initialization.
# If left unset, admin user will not be prompted for a password when creating the initial admin account.
INIT_PASSWORD : ${INIT_PASSWORD}
# The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_WEB_URL : ${CONSOLE_WEB_URL}
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_API_URL : ${CONSOLE_API_URL}
# The URL prefix for Service API endpoints, refers to the base URL of the current API service if api domain is
# different from console domain.
# example: http://api.dify.ai
SERVICE_API_URL : ${SERVICE_API_URL}
# The URL prefix for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain.
# example: http://udify.app
APP_WEB_URL : ${APP_WEB_URL}
# Whether to enable the version check policy. If set to false, https://updates.dify.ai will not be called for version check.
CHECK_UPDATE_URL : ${CHECK_UPDATE_URL}
# Used to change the OpenAI base address, default is https://api.openai.com/v1.
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
# or when a local model provides OpenAI compatible API, it can be replaced.
OPENAI_API_BASE : ${OPENAI_API_BASE}
# File preview or download Url prefix.
# used to display File preview or download Url to the front-end or as Multi-model inputs;
# Url is signed and has expiration time.
FILES_URL : ${FILES_URL}
# File Access Time specifies a time interval in seconds for the file to be accessed.
# The default value is 300 seconds.
FILES_ACCESS_TIMEOUT : ${FILES_ACCESS_TIMEOUT:-300}
# When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
MIGRATION_ENABLED : ${MIGRATION_ENABLED:-true}
# Deployment environment.
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
# Testing environment. There will be a distinct color label on the front-end page,
# indicating that this environment is a testing environment.
DEPLOY_ENV : ${DEPLOY_ENV:-PRODUCTION}
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
DIFY_BIND_ADDRESS : ${DIFY_BIND_ADDRESS}
# API service binding port number, default 5001.
DIFY_PORT : ${DIFY_PORT}
# The number of API server workers, i.e., the number of gevent workers.
# Formula: number of cpu cores x 2 + 1
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
SERVER_WORKER_AMOUNT : ${SERVER_WORKER_AMOUNT}
# Defaults to gevent. If using windows, it can be switched to sync or solo.
SERVER_WORKER_CLASS : ${SERVER_WORKER_CLASS}
# Similar to SERVER_WORKER_CLASS. Default is gevent.
# If using windows, it can be switched to sync or solo.
CELERY_WORKER_CLASS : ${CELERY_WORKER_CLASS}
# Request handling timeout. The default is 200,
# it is recommended to set it to 360 to support a longer sse connection time.
GUNICORN_TIMEOUT : ${GUNICORN_TIMEOUT}
# The number of Celery workers. The default is 1, and can be set as needed.
CELERY_WORKER_AMOUNT : ${CELERY_WORKER_AMOUNT}
# The configurations of postgres database connection.
# It is consistent with the configuration in the 'db' service below.
DB_USERNAME : ${DB_USERNAME}
DB_PASSWORD : ${DB_PASSWORD}
DB_HOST : ${DB_HOST}
DB_PORT : ${DB_PORT}
DB_DATABASE : ${DB_DATABASE}
# The size of the database connection pool.
# The default is 30 connections, which can be appropriately increased.
SQLALCHEMY_POOL_SIZE : ${SQLALCHEMY_POOL_SIZE}
# Database connection pool recycling time, the default is 3600 seconds.
SQLALCHEMY_POOL_RECYCLE : ${SQLALCHEMY_POOL_RECYCLE}
# Whether to print SQL, default is false.
SQLALCHEMY_ECHO : ${SQLALCHEMY_ECHO}
# The configurations of redis connection.
# It is consistent with the configuration in the 'redis' service below.
REDIS_HOST : ${REDIS_HOST}
REDIS_PORT : ${REDIS_PORT:-6379}
REDIS_USERNAME : ${REDIS_USERNAME}
REDIS_PASSWORD : ${REDIS_PASSWORD}
REDIS_USE_SSL : ${REDIS_USE_SSL}
# Redis Database, default is 0. Please use a different Database from Session Redis and Celery Broker.
REDIS_DB : 0
# The configurations of celery broker.
# Use redis as the broker, and redis db 1 for celery broker.
CELERY_BROKER_URL : ${CELERY_BROKER_URL}
BROKER_USE_SSL : ${BROKER_USE_SSL}
# Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
WEB_API_CORS_ALLOW_ORIGINS : ${WEB_API_CORS_ALLOW_ORIGINS}
# Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
CONSOLE_CORS_ALLOW_ORIGINS : ${CONSOLE_CORS_ALLOW_ORIGINS}
# The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
STORAGE_TYPE : ${STORAGE_TYPE}
# The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
# only available when STORAGE_TYPE is `local`.
STORAGE_LOCAL_PATH : storage
# The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
S3_USE_AWS_MANAGED_IAM : ${S3_USE_AWS_MANAGED_IAM}
S3_ENDPOINT : ${S3_ENDPOINT}
S3_BUCKET_NAME : ${S3_BUCKET_NAME}
S3_ACCESS_KEY : ${S3_ACCESS_KEY}
S3_SECRET_KEY : ${S3_SECRET_KEY}
S3_REGION : ${S3_REGION}
# The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
AZURE_BLOB_ACCOUNT_NAME : ${AZURE_BLOB_ACCOUNT_NAME}
AZURE_BLOB_ACCOUNT_KEY : ${AZURE_BLOB_ACCOUNT_KEY}
AZURE_BLOB_CONTAINER_NAME : ${AZURE_BLOB_CONTAINER_NAME}
AZURE_BLOB_ACCOUNT_URL : ${AZURE_BLOB_ACCOUNT_URL}
# The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
GOOGLE_STORAGE_BUCKET_NAME : ${GOOGLE_STORAGE_BUCKET_NAME}
# if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 : ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64}
# The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
ALIYUN_OSS_BUCKET_NAME : ${ALIYUN_OSS_BUCKET_NAME}
ALIYUN_OSS_ACCESS_KEY : ${ALIYUN_OSS_ACCESS_KEY}
ALIYUN_OSS_SECRET_KEY : ${ALIYUN_OSS_SECRET_KEY}
ALIYUN_OSS_ENDPOINT : ${ALIYUN_OSS_ENDPOINT}
ALIYUN_OSS_REGION : ${ALIYUN_OSS_REGION}
ALIYUN_OSS_AUTH_VERSION : ${ALIYUN_OSS_AUTH_VERSION}
# The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
TENCENT_COS_BUCKET_NAME : ${TENCENT_COS_BUCKET_NAME}
TENCENT_COS_SECRET_KEY : ${TENCENT_COS_SECRET_KEY}
TENCENT_COS_SECRET_ID : ${TENCENT_COS_SECRET_ID}
TENCENT_COS_REGION : ${TENCENT_COS_REGION}
TENCENT_COS_SCHEME : ${TENCENT_COS_SCHEME}
# The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
VECTOR_STORE : ${VECTOR_STORE}
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT : ${WEAVIATE_ENDPOINT}
# The Weaviate API key.
WEAVIATE_API_KEY : ${WEAVIATE_API_KEY}
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL : ${QDRANT_URL}
# The Qdrant API key.
QDRANT_API_KEY : ${QDRANT_API_KEY}
# The Qdrant client timeout setting.
QDRANT_CLIENT_TIMEOUT : ${QDRANT_CLIENT_TIMEOUT}
# The Qdrant client enable gRPC mode.
QDRANT_GRPC_ENABLED : ${QDRANT_GRPC_ENABLED}
# The Qdrant server gRPC mode PORT.
QDRANT_GRPC_PORT : ${QDRANT_GRPC_PORT}
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus host.
MILVUS_HOST : ${MILVUS_HOST}
# The milvus host.
MILVUS_PORT : ${MILVUS_PORT}
# The milvus username.
MILVUS_USER : ${MILVUS_USER}
# The milvus password.
MILVUS_PASSWORD : ${MILVUS_PASSWORD}
# The milvus tls switch.
MILVUS_SECURE : ${MILVUS_SECURE}
# relyt configurations
RELYT_HOST : ${RELYT_HOST}
RELYT_PORT : ${RELYT_PORT}
RELYT_USER : ${RELYT_USER}
RELYT_PASSWORD : ${RELYT_PASSWORD}
RELYT_DATABASE : ${RELYT_DATABASE}
# pgvector configurations
PGVECTOR_HOST : ${PGVECTOR_HOST}
PGVECTOR_PORT : ${PGVECTOR_PORT}
PGVECTOR_USER : ${PGVECTOR_USER}
PGVECTOR_PASSWORD : ${PGVECTOR_PASSWORD}
PGVECTOR_DATABASE : ${PGVECTOR_DATABASE}
# tidb vector configurations
TIDB_VECTOR_HOST : ${TIDB_VECTOR_HOST}
TIDB_VECTOR_PORT : ${TIDB_VECTOR_PORT}
TIDB_VECTOR_USER : ${TIDB_VECTOR_USER}
TIDB_VECTOR_PASSWORD : ${TIDB_VECTOR_PASSWORD}
TIDB_VECTOR_DATABASE : ${TIDB_VECTOR_DATABASE}
# oracle configurations
ORACLE_HOST : ${ORACLE_HOST}
ORACLE_PORT : ${ORACLE_PORT}
ORACLE_USER : ${ORACLE_USER}
ORACLE_PASSWORD : ${ORACLE_PASSWORD}
ORACLE_DATABASE : ${ORACLE_DATABASE}
# Chroma configuration
CHROMA_HOST : ${CHROMA_HOST}
CHROMA_PORT : ${CHROMA_PORT}
CHROMA_TENANT : ${CHROMA_TENANT}
CHROMA_DATABASE : ${CHROMA_DATABASE}
CHROMA_AUTH_PROVIDER : ${CHROMA_AUTH_PROVIDER}
CHROMA_AUTH_CREDENTIALS : ${CHROMA_AUTH_CREDENTIALS}
# OpenSearch configuration
OPENSEARCH_HOST : ${OPENSEARCH_HOST}
OPENSEARCH_PORT : ${OPENSEARCH_PORT}
OPENSEARCH_USER : ${OPENSEARCH_USER}
OPENSEARCH_PASSWORD : ${OPENSEARCH_PASSWORD}
OPENSEARCH_SECURE : ${OPENSEARCH_SECURE}
# tencent configurations
TENCENT_VECTOR_DB_URL : ${TENCENT_VECTOR_DB_URL}
TENCENT_VECTOR_DB_API_KEY : ${TENCENT_VECTOR_DB_API_KEY}
TENCENT_VECTOR_DB_TIMEOUT : ${TENCENT_VECTOR_DB_TIMEOUT}
TENCENT_VECTOR_DB_USERNAME : ${TENCENT_VECTOR_DB_USERNAME}
TENCENT_VECTOR_DB_DATABASE : ${TENCENT_VECTOR_DB_DATABASE}
TENCENT_VECTOR_DB_SHARD : ${TENCENT_VECTOR_DB_SHARD}
TENCENT_VECTOR_DB_REPLICAS : ${TENCENT_VECTOR_DB_REPLICAS}
# Knowledge Configuration
# Upload file size limit, default 15M.
UPLOAD_FILE_SIZE_LIMIT : ${UPLOAD_FILE_SIZE_LIMIT}
# The maximum number of files that can be uploaded at a time, default 5.
UPLOAD_FILE_BATCH_LIMIT : ${UPLOAD_FILE_BATCH_LIMIT}
# `dify` Dify's proprietary file extraction scheme
# `Unstructured` Unstructured.io file extraction scheme
ETL_TYPE : ${ETL_TYPE}
# Unstructured API path, needs to be configured when ETL_TYPE is Unstructured.
UNSTRUCTURED_API_URL : ${UNSTRUCTURED_API_URL}
# Multi-modal Configuration
# The format of the image sent when the multi-modal model is input, the default is base64, optional url.
MULTIMODAL_SEND_IMAGE_FORMAT : ${MULTIMODAL_SEND_IMAGE_FORMAT}
# Upload image file size limit, default 10M.
UPLOAD_IMAGE_FILE_SIZE_LIMIT : ${UPLOAD_IMAGE_FILE_SIZE_LIMIT}
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN : ${SENTRY_DSN}
# The sample rate for Sentry events. Default: `1.0`
SENTRY_TRACES_SAMPLE_RATE : ${SENTRY_TRACES_SAMPLE_RATE}
# The sample rate for Sentry profiles. Default: `1.0`
SENTRY_PROFILES_SAMPLE_RATE : ${SENTRY_PROFILES_SAMPLE_RATE}
# Notion import configuration, support public and internal
NOTION_INTEGRATION_TYPE : ${NOTION_INTEGRATION_TYPE}
NOTION_CLIENT_SECRET : ${NOTION_CLIENT_SECRET}
NOTION_CLIENT_ID : ${NOTION_CLIENT_ID}
NOTION_INTERNAL_SECRET : ${NOTION_INTERNAL_SECRET}
# Mail configuration, support: resend, smtp
MAIL_TYPE : ${MAIL_TYPE}
# default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM : ${MAIL_DEFAULT_SEND_FROM}
SMTP_SERVER : ${SMTP_SERVER}
2024-06-29 18:35:32 +08:00
SMTP_PORT : ${SMTP_PORT:-465}
2024-06-28 17:37:52 +08:00
SMTP_USERNAME : ${SMTP_USERNAME}
SMTP_PASSWORD : ${SMTP_PASSWORD}
SMTP_USE_TLS : ${SMTP_USE_TLS}
SMTP_OPPORTUNISTIC_TLS : ${SMTP_OPPORTUNISTIC_TLS}
# the api-key for resend (https://resend.com)
RESEND_API_KEY : ${RESEND_API_KEY}
RESEND_API_URL : https://api.resend.com
# Indexing configuration
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH : ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH}
# Other configurations
INVITE_EXPIRY_HOURS : ${INVITE_EXPIRY_HOURS}
CODE_EXECUTION_ENDPOINT : ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
CODE_EXECUTION_API_KEY : ${CODE_EXECUTION_API_KEY:-dify-sandbox}
CODE_MAX_NUMBER : ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER : ${CODE_MIN_NUMBER:- -9223372036854775808}
CODE_MAX_STRING_LENGTH : ${CODE_MAX_STRING_LENGTH:-80000}
TEMPLATE_TRANSFORM_MAX_LENGTH : ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH : ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH : ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH : ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
SSRF_PROXY_HTTP_URL : ${SSRF_PROXY_HTTP_URL:-'http://ssrf_proxy:3128'}
SSRF_PROXY_HTTPS_URL : ${SSRF_PROXY_HTTPS_URL:-'http://ssrf_proxy:3128'}
2023-05-15 08:51:32 +08:00
services :
# API service
api :
2024-06-29 17:43:20 +08:00
image : langgenius/dify-api:0.6.12-fix1
2023-05-15 08:51:32 +08:00
restart : always
environment :
2024-06-28 17:37:52 +08:00
# Use the shared environment variables.
<< : *shared-api-worker-env
2023-05-15 08:51:32 +08:00
# Startup mode, 'api' starts the API server.
MODE : api
depends_on :
2023-09-10 12:07:20 +08:00
- db
- redis
2023-05-15 08:51:32 +08:00
volumes :
# Mount the storage directory to the container, for storing user files.
2023-05-15 16:57:50 +08:00
- ./volumes/app/storage:/app/api/storage
2023-11-30 09:46:36 +08:00
# uncomment to expose dify-api port to host
# ports:
# - "5001:5001"
2024-05-13 14:39:14 +08:00
networks :
- ssrf_proxy_network
- default
2023-05-15 08:51:32 +08:00
# worker service
# The Celery worker for processing the queue.
worker :
2024-06-29 17:43:20 +08:00
image : langgenius/dify-api:0.6.12-fix1
2023-05-15 08:51:32 +08:00
restart : always
environment :
2024-06-28 17:37:52 +08:00
# Use the shared environment variables.
<< : *shared-api-worker-env
2023-05-15 08:51:32 +08:00
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE : worker
depends_on :
2023-09-10 12:07:20 +08:00
- db
- redis
2023-05-15 08:51:32 +08:00
volumes :
# Mount the storage directory to the container, for storing user files.
2023-05-15 16:57:50 +08:00
- ./volumes/app/storage:/app/api/storage
2024-05-13 14:39:14 +08:00
networks :
- ssrf_proxy_network
- default
2023-05-15 08:51:32 +08:00
# Frontend web application.
web :
2024-06-29 17:43:20 +08:00
image : langgenius/dify-web:0.6.12-fix1
2023-05-15 08:51:32 +08:00
restart : always
environment :
2024-06-28 17:37:52 +08:00
CONSOLE_API_URL : ${CONSOLE_API_URL:-}
APP_API_URL : ${APP_API_URL:-}
SENTRY_DSN : ${SENTRY_DSN:-}
2023-11-30 09:46:36 +08:00
# uncomment to expose dify-web port to host
# ports:
# - "3000:3000"
2023-05-15 08:51:32 +08:00
# The postgres database.
db :
image : postgres:15-alpine
restart : always
environment :
2024-06-28 17:37:52 +08:00
PGUSER : ${PGUSER:-postgres}
POSTGRES_PASSWORD : ${POSTGRES_PASSWORD:-difyai123456}
POSTGRES_DB : ${POSTGRES_DB:-dify}
PGDATA : ${PGDATA:-/var/lib/postgresql/data/pgdata}
2023-05-15 08:51:32 +08:00
volumes :
- ./volumes/db/data:/var/lib/postgresql/data
2023-12-24 15:43:43 +08:00
# uncomment to expose db(postgresql) port to host
# ports:
# - "5432:5432"
2023-09-09 13:47:08 +08:00
healthcheck :
2024-01-09 18:15:25 +08:00
test : [ "CMD" , "pg_isready" ]
2023-09-09 13:47:08 +08:00
interval : 1s
timeout : 3s
retries : 30
2023-05-15 08:51:32 +08:00
# The redis cache.
redis :
image : redis:6-alpine
restart : always
volumes :
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
2024-06-28 17:37:52 +08:00
command : redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
2023-09-09 13:47:08 +08:00
healthcheck :
2024-01-09 10:31:52 +08:00
test : [ "CMD" , "redis-cli" , "ping" ]
2023-11-30 09:46:36 +08:00
# uncomment to expose redis port to host
# ports:
# - "6379:6379"
2023-05-15 08:51:32 +08:00
2024-04-08 18:51:46 +08:00
# The DifySandbox
sandbox :
2024-06-02 11:30:14 +08:00
image : langgenius/dify-sandbox:0.2.1
2024-04-08 18:51:46 +08:00
restart : always
environment :
# The DifySandbox configurations
2024-05-13 14:39:14 +08:00
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
2024-06-28 17:37:52 +08:00
API_KEY : ${API_KEY:-dify-sandbox}
GIN_MODE : ${GIN_MODE:-release}
WORKER_TIMEOUT : ${WORKER_TIMEOUT:-15}
ENABLE_NETWORK : ${ENABLE_NETWORK:-true}
HTTP_PROXY : ${HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY : ${HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT : ${SANDBOX_PORT:-8194}
2024-05-13 14:39:14 +08:00
volumes :
- ./volumes/sandbox/dependencies:/dependencies
networks :
- ssrf_proxy_network
2024-04-08 18:51:46 +08:00
2024-05-13 14:39:14 +08:00
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
ssrf_proxy :
image : ubuntu/squid:latest
restart : always
volumes :
2024-06-28 17:37:52 +08:00
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint.sh
2024-06-29 00:48:34 +08:00
entrypoint : [ "sh" , "-c" , "chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
2024-06-28 17:37:52 +08:00
environment :
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT : ${HTTP_PORT:-3128}
COREDUMP_DIR : ${COREDUMP_DIR:-/var/spool/squid}
REVERSE_PROXY_PORT : ${REVERSE_PROXY_PORT:-8194}
SANDBOX_HOST : ${SANDBOX_HOST:-sandbox}
SANDBOX_PORT : ${SANDBOX_PORT:-8194}
2024-05-13 14:39:14 +08:00
networks :
- ssrf_proxy_network
- default
2024-05-10 17:20:30 +08:00
2023-05-15 08:51:32 +08:00
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
nginx :
image : nginx:latest
2023-12-24 15:33:28 +08:00
restart : always
2023-05-15 08:51:32 +08:00
volumes :
2024-06-28 17:37:52 +08:00
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
2023-05-15 08:51:32 +08:00
- ./nginx/conf.d:/etc/nginx/conf.d
2024-06-28 17:37:52 +08:00
- ./nginx/docker-entrypoint.sh:/docker-entrypoint.sh
- ./nginx/ssl:/etc/ssl
2024-06-29 00:48:34 +08:00
entrypoint : [ "sh" , "-c" , "chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
2024-06-28 17:37:52 +08:00
environment :
NGINX_SERVER_NAME : ${NGINX_SERVER_NAME:-_}
HTTPS_ENABLED : ${HTTPS_ENABLED:-false}
NGINX_SSL_PORT : ${NGINX_SSL_PORT:-443}
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
# and modify the env vars below in .env if HTTPS_ENABLED is true.
NGINX_SSL_CERT_FILENAME : ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME : ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS : ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES : ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE : ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT : ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT : ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT : ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
2023-05-15 08:51:32 +08:00
depends_on :
- api
- web
ports :
2024-06-28 17:37:52 +08:00
- "${NGINX_PORT:-80}:80"
- "${NGINX_SSL_PORT:-443}:443"
# The Weaviate vector store.
weaviate :
image : semitechnologies/weaviate:1.19.0
profiles :
- weaviate
restart : always
volumes :
# Mount the Weaviate data directory to the con tainer.
- ./volumes/weaviate:/var/lib/weaviate
environment :
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH : ${PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT : ${QUERY_DEFAULTS_LIMIT:-25}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED : ${AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
DEFAULT_VECTORIZER_MODULE : ${DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME : ${CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED : ${AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS : ${AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS : ${AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED : ${AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS : ${AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
# uncomment to expose weaviate port to host
ports :
- "8080:8080"
# Qdrant vector store.
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
qdrant :
image : langgenius/qdrant:v1.7.3
profiles :
- qdrant
restart : always
volumes :
- ./volumes/qdrant:/qdrant/storage
environment :
QDRANT_API_KEY : ${QDRANT_API_KEY:-difyai123456}
# uncomment to expose qdrant port to host
ports :
- "6333:6333"
- "6334:6334"
# The pgvector vector database.
pgvector :
image : pgvector/pgvector:pg16
profiles :
- pgvector
restart : always
environment :
PGUSER : ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD : ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB : ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA : ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes :
- ./volumes/pgvector/data:/var/lib/postgresql/data
# uncomment to expose db(postgresql) port to host
ports :
- "5433:5432"
healthcheck :
test : [ "CMD" , "pg_isready" ]
interval : 1s
timeout : 3s
retries : 30
# pgvecto-rs vector store
pgvecto-rs :
image : tensorchord/pgvecto-rs:pg16-v0.2.0
profiles :
- pgvecto-rs
restart : always
environment :
PGUSER : ${PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD : ${POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB : ${POSTGRES_DB:-dify}
# postgres data directory
PGDATA : ${PGDATA:-/var/lib/postgresql/data/pgdata}
volumes :
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
# uncomment to expose db(postgresql) port to host
ports :
- "5431:5432"
healthcheck :
test : [ "CMD" , "pg_isready" ]
interval : 1s
timeout : 3s
retries : 30
# Chroma vector database
chroma :
image : ghcr.io/chroma-core/chroma:0.5.1
profiles :
- chroma
restart : always
volumes :
- ./volumes/chroma:/chroma/chroma
environment :
CHROMA_SERVER_AUTHN_CREDENTIALS : ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER : ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
IS_PERSISTENT : ${IS_PERSISTENT:-TRUE}
ports :
- "8000:8000"
oracle :
image : container-registry.oracle.com/database/free:latest
profiles :
- oracle
restart : always
ports :
- 1521 : 1521
volumes :
- type : volume
source : oradata
target : /opt/oracle/oradata
- ./startupscripts:/opt/oracle/scripts/startup
environment :
- ORACLE_PWD=${ORACLE_PWD:-Dify123456}
- ORACLE_CHARACTERSET=${ORACLE_CHARACTERSET:-AL32UTF8}
# Milvus vector database services
etcd :
container_name : milvus-etcd
image : quay.io/coreos/etcd:v3.5.5
profiles :
- milvus
environment :
- ETCD_AUTO_COMPACTION_MODE=${ETCD_AUTO_COMPACTION_MODE:-revision}
- ETCD_AUTO_COMPACTION_RETENTION=${ETCD_AUTO_COMPACTION_RETENTION:-1000}
- ETCD_QUOTA_BACKEND_BYTES=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
- ETCD_SNAPSHOT_COUNT=${ETCD_SNAPSHOT_COUNT:-50000}
volumes :
- ./volumes/milvus/etcd:/etcd
command : etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck :
test : [ "CMD" , "etcdctl" , "endpoint" , "health" ]
interval : 30s
timeout : 20s
retries : 3
networks :
- milvus
minio :
container_name : milvus-minio
image : minio/minio:RELEASE.2023-03-20T20-16-18Z
profiles :
- milvus
environment :
MINIO_ACCESS_KEY : ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY : ${MINIO_SECRET_KEY:-minioadmin}
ports :
- "9001:9001"
- "9000:9000"
volumes :
- ./volumes/milvus/minio:/minio_data
command : minio server /minio_data --console-address ":9001"
healthcheck :
test : [ "CMD" , "curl" , "-f" , "http://localhost:9000/minio/health/live" ]
interval : 30s
timeout : 20s
retries : 3
networks :
- milvus
milvus-standalone :
container_name : milvus-standalone
image : milvusdb/milvus:v2.3.1
profiles :
- milvus
command : [ "milvus" , "run" , "standalone" ]
environment :
ETCD_ENDPOINTS : ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS : ${MINIO_ADDRESS:-minio:9000}
common.security.authorizationEnabled : ${MILVUS_AUTHORIZATION_ENABLED:-true}
volumes :
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck :
test : [ "CMD" , "curl" , "-f" , "http://localhost:9091/healthz" ]
interval : 30s
start_period : 90s
timeout : 20s
retries : 3
ports :
- "19530:19530"
- "9091:9091"
depends_on :
- "etcd"
- "minio"
networks :
- milvus
opensearch :
container_name : opensearch
image : opensearchproject/opensearch:latest
profiles :
- opensearch
environment :
- discovery.type=${OPENSEARCH_DISCOVERY_TYPE:-single-node}
- bootstrap.memory_lock=${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
- OPENSEARCH_JAVA_OPTS=-Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
ulimits :
memlock :
soft : ${OPENSEARCH_MEMLOCK_SOFT:--1}
hard : ${OPENSEARCH_MEMLOCK_HARD:--1}
nofile :
soft : ${OPENSEARCH_NOFILE_SOFT:-65536}
hard : ${OPENSEARCH_NOFILE_HARD:-65536}
volumes :
- ./volumes/opensearch/data:/usr/share/opensearch/data
ports :
- "9200:9200"
- "9600:9600"
networks :
- opensearch-net
opensearch-dashboards :
container_name : opensearch-dashboards
image : opensearchproject/opensearch-dashboards:latest
profiles :
- opensearch
ports :
- "5601:5601"
expose :
- "5601"
environment :
OPENSEARCH_HOSTS : '["https://opensearch:9200"]'
volumes :
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
networks :
- opensearch-net
depends_on :
- opensearch
2024-05-13 14:39:14 +08:00
networks :
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network :
driver : bridge
internal : true
2024-06-28 17:37:52 +08:00
milvus :
driver : bridge
opensearch-net :
driver : bridge
internal : true
2024-06-22 01:48:07 +08:00
2024-06-28 17:37:52 +08:00
volumes :
oradata :