Skip to content
Open
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
2bb2857
docker start
Sep 22, 2025
f4bbfb6
docker start
Sep 22, 2025
f0f0790
update config
Sep 22, 2025
45c5703
代码检测
Sep 22, 2025
4a13c1e
test_start_api
Sep 22, 2025
c337ef3
test_start_api
Sep 22, 2025
c0bd774
test_start_api
Sep 23, 2025
05f485f
fix docker start
Sep 23, 2025
7c77883
update start_api
Sep 23, 2025
3dd8e2e
update start_api
Sep 23, 2025
f7d29f8
update start_api
Sep 23, 2025
c817aac
代码检测
Sep 23, 2025
3206a15
update start_api
Oct 4, 2025
b712065
Merge branch 'MemTensor:dev' into dev
pursues Oct 9, 2025
28310ff
Merge branch 'MemTensor:dev' into dev
pursues Dec 15, 2025
25613d2
Merge branch 'MemTensor:dev' into dev
pursues Dec 15, 2025
d520167
Merge branch 'MemTensor:dev' into dev
pursues Dec 16, 2025
bf5b954
update .env.example
pursues Dec 23, 2025
e6a5a32
Merge branch 'MemTensor:dev' into dev
pursues Dec 23, 2025
2e07a28
Merge branch 'dev' of github.com:pursues/MemOS into dev
pursues Dec 23, 2025
0ed79f9
back start_api
pursues Dec 23, 2025
51334f7
update
pursues Dec 23, 2025
4a575b9
update Dockerfile
pursues Dec 24, 2025
496b2e0
Merge branch 'MemTensor:dev' into dev
pursues Dec 24, 2025
67675fa
merge
pursues Dec 24, 2025
063e2a4
update requirements
pursues Dec 24, 2025
7ccfadb
Merge branch 'MemTensor:dev' into dev
pursues Dec 24, 2025
1cfb82a
back Dockerfile
pursues Dec 24, 2025
1e011f9
Merge branch 'dev' of github.com:pursues/MemOS into dev
pursues Dec 24, 2025
c71c4d6
upadte Dockerfile
pursues Dec 24, 2025
6e9cebf
Merge branch 'dev' of github.com:pursues/MemOS into dev
pursues Dec 24, 2025
658fcce
add
pursues Dec 24, 2025
cf33bee
Merge branch 'MemTensor:dev' into dev
pursues Dec 25, 2025
d924f97
update env.example
pursues Dec 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 86 additions & 62 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,31 @@

## Base
TZ=Asia/Shanghai
ENV_NAME=PLAYGROUND_OFFLINE # Tag shown in DingTalk notifications (e.g., PROD_ONLINE/TEST); no runtime effect unless ENABLE_DINGDING_BOT=true
MOS_CUBE_PATH=/tmp/data_test # local data path
MEMOS_BASE_PATH=. # CLI/SDK cache path
MOS_ENABLE_DEFAULT_CUBE_CONFIG=true # enable default cube config
MOS_ENABLE_REORGANIZE=false # enable memory reorg
# MOS Text Memory Type
MOS_TEXT_MEM_TYPE=general_text # general_text | tree_text
ASYNC_MODE=sync # async/sync, used in default cube config

## User/session defaults
MOS_USER_ID=root
MOS_SESSION_ID=default_session
MOS_MAX_TURNS_WINDOW=20
# Top-K for LLM in the Product API(old version)
MOS_TOP_K=50

## Chat LLM (main dialogue)
# LLM model name for the Product API
MOS_CHAT_MODEL=gpt-4o-mini
# Temperature for LLM in the Product API
MOS_CHAT_TEMPERATURE=0.8
# Max tokens for LLM in the Product API
MOS_MAX_TOKENS=2048
# Top-P for LLM in the Product API
MOS_TOP_P=0.9
# LLM for the Product API backend
MOS_CHAT_MODEL_PROVIDER=openai # openai | huggingface | vllm
MOS_MODEL_SCHEMA=memos.configs.llm.VLLMLLMConfig # vllm only: config class path; keep default unless you extend it
OPENAI_API_KEY=sk-xxx # [required] when provider=openai
OPENAI_API_BASE=https://api.openai.com/v1 # [required] base for the key
OPENAI_BASE_URL= # compatibility for eval/scheduler
VLLM_API_KEY= # required when provider=vllm
VLLM_API_BASE=http://localhost:8088/v1 # required when provider=vllm

## MemReader / retrieval LLM
MEMRADER_MODEL=gpt-4o-mini
Expand All @@ -37,40 +36,60 @@ MEMRADER_API_BASE=http://localhost:3000/v1 # [required] base for the key
MEMRADER_MAX_TOKENS=5000

## Embedding & rerank
# embedding dim
EMBEDDING_DIMENSION=1024
# set default embedding backend
MOS_EMBEDDER_BACKEND=universal_api # universal_api | ollama
# set openai style
MOS_EMBEDDER_PROVIDER=openai # required when universal_api
# embedding model
MOS_EMBEDDER_MODEL=bge-m3 # siliconflow → use BAAI/bge-m3
# embedding url
MOS_EMBEDDER_API_BASE=http://localhost:8000/v1 # required when universal_api
# embedding model key
MOS_EMBEDDER_API_KEY=EMPTY # required when universal_api
OLLAMA_API_BASE=http://localhost:11434 # required when backend=ollama
# reanker config
MOS_RERANKER_BACKEND=http_bge # http_bge | http_bge_strategy | cosine_local
# reanker url
MOS_RERANKER_URL=http://localhost:8001 # required when backend=http_bge*
# reranker model
MOS_RERANKER_MODEL=bge-reranker-v2-m3 # siliconflow → use BAAI/bge-reranker-v2-m3
MOS_RERANKER_HEADERS_EXTRA= # extra headers, JSON string, e.g. {"Authorization":"Bearer your_token"}
# use source
MOS_RERANKER_STRATEGY=single_turn
MOS_RERANK_SOURCE= # optional rerank scope, e.g., history/stream/custom


# External Services (for evaluation scripts)
# API key for reproducting Zep(compertitor product) evaluation
ZEP_API_KEY=your_zep_api_key_here
# API key for reproducting Mem0(compertitor product) evaluation
MEM0_API_KEY=your_mem0_api_key_here
# API key for reproducting MemU(compertitor product) evaluation
MEMU_API_KEY=your_memu_api_key_here
# API key for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_API_KEY=your_memobase_api_key_here
# Project url for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_PROJECT_URL=your_memobase_project_url_here
# LLM for evaluation
MODEL=gpt-4o-mini
# embedding model for evaluation
EMBEDDING_MODEL=nomic-embed-text:latest

## Internet search & preference memory
# Enable web search
ENABLE_INTERNET=false
# API key for BOCHA Search
BOCHA_API_KEY= # required if ENABLE_INTERNET=true
XINYU_API_KEY=
XINYU_SEARCH_ENGINE_ID=
# default search mode
SEARCH_MODE=fast # fast | fine | mixture
FAST_GRAPH=false
BM25_CALL=false
VEC_COT_CALL=false
# Slow retrieval strategy configuration, rewrite is the rewrite strategy
FINE_STRATEGY=rewrite # rewrite | recreate | deep_search
ENABLE_ACTIVATION_MEMORY=false
# Whether to enable preference memory
ENABLE_PREFERENCE_MEMORY=true
# Preference Memory Add Mode
PREFERENCE_ADDER_MODE=fast # fast | safe
# Whether to deduplicate explicit preferences based on factual memory
DEDUP_PREF_EXP_BY_TEXTUAL=false

## Reader chunking
Expand All @@ -81,110 +100,115 @@ MEM_READER_CHAT_CHUNK_SESS_SIZE=10 # sessions per chunk (default mode)
MEM_READER_CHAT_CHUNK_OVERLAP=2 # overlap between chunks

## Scheduler (MemScheduler / API)
# Enable or disable the main switch for configuring the memory scheduler during MemOS class initialization
MOS_ENABLE_SCHEDULER=false
# Determine the number of most relevant memory entries that the scheduler retrieves or processes during runtime (such as reordering or updating working memory)
MOS_SCHEDULER_TOP_K=10
# The time interval (in seconds) for updating "Activation Memory" (usually referring to caching or short-term memory mechanisms)
MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL=300
# The size of the context window considered by the scheduler when processing tasks (such as the number of recent messages or conversation rounds)
MOS_SCHEDULER_CONTEXT_WINDOW_SIZE=5
# The maximum number of working threads allowed in the scheduler thread pool for concurrent task execution
MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS=10000
# The polling interval (in seconds) for the scheduler to consume new messages/tasks from the queue. The smaller the value, the faster the response, but the CPU usage may be higher
MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS=0.01
# Whether to enable the parallel distribution function of the scheduler to improve the throughput of concurrent operations
MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH=true
# The specific switch to enable or disable the "Activate Memory" function in the scheduler logic
MOS_SCHEDULER_ENABLE_ACTIVATION_MEMORY=false
# Control whether the scheduler instance is actually started during server initialization. If false, the scheduler object may be created but its background loop will not be started
API_SCHEDULER_ON=true
# Specifically define the window size for API search operations in OptimizedScheduler. It is passed to the ScherderrAPIModule to control the scope of the search context
API_SEARCH_WINDOW_SIZE=5
# Specify how many rounds of previous conversations (history) to retrieve and consider during the 'hybrid search' (fast search+asynchronous fine search). This helps provide context aware search results
API_SEARCH_HISTORY_TURNS=5

## Graph / vector stores
# Neo4j database selection mode
NEO4J_BACKEND=neo4j-community # neo4j-community | neo4j | nebular | polardb
# Neo4j database url
NEO4J_URI=bolt://localhost:7687 # required when backend=neo4j*
# Neo4j database user
NEO4J_USER=neo4j # required when backend=neo4j*
# Neo4j database password
NEO4J_PASSWORD=12345678 # required when backend=neo4j*
# Neo4j database name
NEO4J_DB_NAME=neo4j # required for shared-db mode
# Neo4j database data sharing with Memos
MOS_NEO4J_SHARED_DB=false
QDRANT_HOST=localhost
QDRANT_PORT=6333
# For Qdrant Cloud / remote endpoint (takes priority if set):
QDRANT_URL=your_qdrant_url
QDRANT_API_KEY=your_qdrant_key
# milvus server uri
MILVUS_URI=http://localhost:19530 # required when ENABLE_PREFERENCE_MEMORY=true
MILVUS_USER_NAME=root # same as above
MILVUS_PASSWORD=12345678 # same as above
NEBULAR_HOSTS=["localhost"]
NEBULAR_USER=root
NEBULAR_PASSWORD=xxxxxx
NEBULAR_SPACE=shared-tree-textual-memory
NEBULAR_WORKING_MEMORY=20
NEBULAR_LONGTERM_MEMORY=1000000
NEBULAR_USER_MEMORY=1000000

## Relational DB (user manager / PolarDB)
MOS_USER_MANAGER_BACKEND=sqlite # sqlite | mysql
MYSQL_HOST=localhost # required when backend=mysql
MYSQL_PORT=3306
MYSQL_USERNAME=root
MYSQL_PASSWORD=12345678
MYSQL_DATABASE=memos_users
MYSQL_CHARSET=utf8mb4

# PolarDB endpoint/host
POLAR_DB_HOST=localhost
# PolarDB port
POLAR_DB_PORT=5432
# PolarDB username
POLAR_DB_USER=root
# PolarDB password
POLAR_DB_PASSWORD=123456
# PolarDB database name
POLAR_DB_DB_NAME=shared_memos_db
# PolarDB Server Mode:
# If set to true, use Multi-Database Mode where each user has their own independent database (physical isolation).
# If set to false (default), use Shared Database Mode where all users share one database with logical isolation via username.
POLAR_DB_USE_MULTI_DB=false
# PolarDB connection pool size
POLARDB_POOL_MAX_CONN=100

## Redis (scheduler queue) — fill only if you want scheduler queues in Redis; otherwise in-memory queue is used
REDIS_HOST=localhost # global Redis endpoint (preferred over MEMSCHEDULER_*)
REDIS_PORT=6379
REDIS_DB=0
REDIS_PASSWORD=
REDIS_SOCKET_TIMEOUT=
REDIS_SOCKET_CONNECT_TIMEOUT=
## Related configurations of Redis
# Reddimq sends scheduling information and synchronization information for some variables
MEMSCHEDULER_REDIS_HOST= # fallback keys if not using the global ones
MEMSCHEDULER_REDIS_PORT=
MEMSCHEDULER_REDIS_DB=
MEMSCHEDULER_REDIS_PASSWORD=
MEMSCHEDULER_REDIS_TIMEOUT=
MEMSCHEDULER_REDIS_CONNECT_TIMEOUT=

## MemScheduler LLM
MEMSCHEDULER_OPENAI_API_KEY= # LLM key for scheduler’s own calls (OpenAI-compatible); leave empty if scheduler not using LLM
MEMSCHEDULER_OPENAI_BASE_URL= # Base URL for the above; can reuse OPENAI_API_BASE
MEMSCHEDULER_OPENAI_DEFAULT_MODEL=gpt-4o-mini

## Nacos (optional config center)
# Nacos turns off long polling listening, defaults to true
NACOS_ENABLE_WATCH=false
# The monitoring interval for long rotation training is 60 seconds, and the default 30 seconds can be left unconfigured
NACOS_WATCH_INTERVAL=60
# nacos server address
NACOS_SERVER_ADDR=
# nacos dataid
NACOS_DATA_ID=
# nacos group
NACOS_GROUP=DEFAULT_GROUP
# nacos namespace
NACOS_NAMESPACE=
# nacos ak
AK=
# nacos sk
SK=

## DingTalk bot & OSS upload
ENABLE_DINGDING_BOT=false # set true -> fields below required
DINGDING_ACCESS_TOKEN_USER=
DINGDING_SECRET_USER=
DINGDING_ACCESS_TOKEN_ERROR=
DINGDING_SECRET_ERROR=
DINGDING_ROBOT_CODE=
DINGDING_APP_KEY=
DINGDING_APP_SECRET=
OSS_ENDPOINT= # bot image upload depends on OSS
OSS_REGION=
OSS_BUCKET_NAME=
OSS_ACCESS_KEY_ID=
OSS_ACCESS_KEY_SECRET=
OSS_PUBLIC_BASE_URL=

## SDK / external client
MEMOS_API_KEY=
MEMOS_BASE_URL=https://memos.memtensor.cn/api/openmem/v1

# chat model for chat api
CHAT_MODEL_LIST='[{
"backend": "deepseek",
"api_base": "http://localhost:1234",
"api_key": "your-api-key",
"model_name_or_path": "deepseek-r1",
"support_models": ["deepseek-r1"]
}]'

# RabbitMQ host name for message-log pipeline
MEMSCHEDULER_RABBITMQ_HOST_NAME=
# RabbitMQ user name for message-log pipeline
MEMSCHEDULER_RABBITMQ_USER_NAME=
# RabbitMQ password for message-log pipeline
MEMSCHEDULER_RABBITMQ_PASSWORD=
# RabbitMQ virtual host for message-log pipeline
MEMSCHEDULER_RABBITMQ_VIRTUAL_HOST=memos
# Erase connection state on connect for message-log pipeline
MEMSCHEDULER_RABBITMQ_ERASE_ON_CONNECT=true
# RabbitMQ port for message-log pipeline
MEMSCHEDULER_RABBITMQ_PORT=5672