diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..5dab47c --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,53 @@ +version: '3.5' + +services: + tailscaled: + container_name: tailscaled + image: tailscale/tailscale:unstable-v1.77.41 + privileged: true # 需要权限访问 TUN 设备 + restart: unless-stopped + cap_add: + - net_admin + - sys_module + devices: + - /dev/net/tun:/dev/net/tun + volumes: + - ./lib/:/var/lib/tailscale # 使状态路径挂载为 tailscaled 使用的状态目录 + - /dev/net/tun:/dev/net/tun # 访问 TUN 设备 + - /var/run/dbus:/var/run/dbus + - /var/run/tailscale:/var/run/tailscale + - /tmp:/tmp + environment: + - TS_AUTHKEY=21c768657ba8aa6c0436eba69d28fa8d626da767a44f055d # 使用认证密钥 + - TS_STATE_DIR=/var/lib/tailscale # 状态保存路径 + - TS_USERSPACE=false # 使用内核的 TUN 设备 + - "TS_EXTRA_ARGS=--login-server=https://headscale.jmsu.top --advertise-tags=tag:container --reset" + + ollama: + container_name: ollama + pull_policy: always + # tty: true + restart: unless-stopped + image: ollama/ollama:latest + network_mode: service:tailscaled + environment: + - OLLAMA_MODELS=/usr/share/ollama/.ollama/models # 设置模型路径的环境变量 + - NVIDIA_VISIBLE_DEVICES=all + - NVIDIA_DRIVER_CAPABILITIES=compute,utility + - OLLAMA_ORIGINS="*" + - OLLAMA_HOST="0.0.0.0" + volumes: + - ollama-data:/root/.ollama + - ollama-data:/usr/share/ollama/.ollama + # Uncomment below for GPU support + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + +volumes: + ollama-data: + driver: local \ No newline at end of file