paulwong

部署docker版的人工智能OPEN-WEBUI+OLLAMA+NGINX

一键部署人工智能中的OPEN-WEBUI,OLLAMA, NGINX,也就对类似OPEN-AI的对话机器人
docker-compose.yaml
services:

  # ollama:
  #   deploy:
  #     resources:
  #       reservations:
  #         devices:
  #           - driver: nvidia
  #             count: all
  #             capabilities:
  #               - gpu  #使用GPU加速
  #   volumes:
  #     - ollama-volume:/root/.ollama #配置OLLAMA的配置数据文件在宿主机
  #     - /etc/localtime:/etc/localtime:ro
  #   container_name: ollama
  #   image: ollama/ollama
  #   restart: unless-stopped
  #   networks:
  #     - isolated #使用DOCKER的隔离网络
  #     - internet

  vllm:
    container_name: vllm
    image: vllm/vllm-openai:latest
    # ipc: host
    volumes:
      - ${HUGGINGFACE_MODELS_DIR}:/models
      - /etc/localtime:/etc/localtime:ro
    command: >
      --model /models/models--unsloth--llama-3-8b-Instruct-lawdata
      --served-model-name llama-3-8b-Instruct-lawdata
      --gpu-memory-utilization 0.90
      --max_model_len 1072
      --quantization bitsandbytes
      --load_format bitsandbytes
    ports:
      - "8000:8000"
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: all
              capabilities: [gpu]
    networks:
      - isolated #使用DOCKER的隔离网络

  # https://github.com/open-webui/open-webui
  open-webui: #全局维一的服务名
    volumes:
      - open-webui-volume:/app/backend/data #配置open-webui的配置数据文件在宿主机
      - /etc/localtime:/etc/localtime:ro
    container_name: open-webui
    restart: unless-stopped
    image: ghcr.io/open-webui/open-webui:main
    # network_mode: host
    ports:
      - "3000:3000"
    environment:
      # - OLLAMA_BASE_URL=http://ollama:11434 #OPEN-WEBUI访问OLLAMA的地址,其实就是服务名代替IP
      - ENABLE_OLLAMA_API=False
      - OPENAI_API_BASE_URL=http://vllm:8000 /v1
      - /etc/localtime:/etc/localtime:ro
      - LOG_LEVEL=DEBUG
    depends_on:
      # - ollama
      - vllm
    networks:
      - isolated

  nginx-webui:
    volumes:
      - ${NGINX_DATA_DIR}/html:/usr/share/nginx/html:ro
      - ${NGINX_DATA_DIR}/conf/nginx.conf:/etc/nginx/nginx.conf:ro
      - ${NGINX_DATA_DIR}/conf/conf.d/default.conf:/etc/nginx/conf.d/default.conf:ro
      - ${NGINX_DATA_DIR}/conf/.htpasswd:/etc/nginx/.htpasswd:ro
      - /etc/localtime:/etc/localtime:ro
      - ${NGINX_DATA_DIR}/log/access.log:/var/log/nginx/access.log
      - ${NGINX_DATA_DIR}/log/error.log:/var/log/nginx/error.log
    container_name: nginx-webui
    ports:
      - "81:81"
    image: nginx:latest
    #image: quay.io/ricardbejarano/nginx
    depends_on:
      - open-webui
    restart: unless-stopped
    networks:
      - isolated
      - internet

volumes:
  ollama-volume:
    driver: local
    driver_opts:
      type: none
      o: bind
      device: ${OLLAMA_DATA_DIR}
  open-webui-volume:
    driver: local
    driver_opts:
      type: none
      o: bind
      device: ${OPEN_WEBUI_DATA_DIR}

networks:
  isolated:
    driver: bridge
    internal: true
  internet:
    driver: bridge

nginx.conf
user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;

events {
    worker_connections  1024;
}

http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    keepalive_timeout  65;

    include /etc/nginx/conf.d/*.conf;  # 加载 conf.d 目录下的配置文件
}

docker/docker-nginx/data/conf/conf.d/default.conf
# server {
#     listen       80;
#     server_name  example.com www.example.com;

#     root   /usr/share/nginx/html;
#     index  index.html index.htm;

#     location / {
#         try_files $uri $uri/ =404;
#     }

#     error_page   500 502 503 504  /50x.html;
#     location = /50x.html {
#         root   /usr/share/nginx/html;
#     }
# }
server {
    listen 81;
    server_name localhost;

    location / {
        proxy_pass http://open-webui:8080;
        # proxy_pass http://localhost:8080;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }

    # 代理 WebSocket 请求
    location /ws/ {
        proxy_pass http://open-webui:8080;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "Upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }

    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;
}

00_varible.sh
#!/bin/bash

# 获取当前脚本的路径
# SCRIPT_PATH="$(realpath "$0")"
# echo "当前脚本的路径是: $SCRIPT_PATH"

# 获取当前脚本所在的目录
# SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
# echo "当前脚本所在的目录是: $SCRIPT_DIR"
# cd $SCRIPT_DIR

# export HTTP_PROXY=http://192.168.0.102:7890
# export HTTPS_PROXY=https://192.168.0.102:7890


export DOCKER_ROOT_DIR=/home/paul/paulwong/work/workspaces/python-ai-project/docker
export NGINX_DATA_DIR=${DOCKER_ROOT_DIR}/docker-nginx/data
export OLLAMA_DATA_DIR=${DOCKER_ROOT_DIR}/docker-ollama/data
export OPEN_WEBUI_DATA_DIR=${DOCKER_ROOT_DIR}/docker-webui/data
export HUGGINGFACE_MODELS_DIR=/home/paul/.cache/huggingface/models

01_start-nginx-ollama-webui.sh
#!/bin/bash

# 获取当前脚本的路径
SCRIPT_PATH="$(realpath "$0")"
echo "当前脚本的路径是: $SCRIPT_PATH"

# 获取当前脚本所在的目录
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
echo "当前脚本所在的目录是: $SCRIPT_DIR"
cd $SCRIPT_DIR

source ./00_varible.sh
docker compose -f configs/docker-compose.yaml down
docker compose -f configs/docker-compose.yaml up

02_restart-nginx-ollama-webui.sh
#!/bin/bash

# 获取当前脚本的路径
SCRIPT_PATH="$(realpath "$0")"
echo "当前脚本的路径是: $SCRIPT_PATH"

# 获取当前脚本所在的目录
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
echo "当前脚本所在的目录是: $SCRIPT_DIR"
cd $SCRIPT_DIR

source ./00_varible.sh
docker compose -f configs/docker-compose.yaml restart

03_login_ollama.sh
#!/bin/bash

# 获取当前脚本的路径
SCRIPT_PATH="$(realpath "$0")"
echo "当前脚本的路径是: $SCRIPT_PATH"

# 获取当前脚本所在的目录
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
echo "当前脚本所在的目录是: $SCRIPT_DIR"
cd $SCRIPT_DIR

source ./00_varible.sh
docker compose -f configs/docker-compose.yaml exec ollama /bin/bash
# echo ${DOCKER_ROOT_DIR}

04_restart_open_webui.sh
#!/bin/bash

# 获取当前脚本的路径
SCRIPT_PATH="$(realpath "$0")"
echo "当前脚本的路径是: $SCRIPT_PATH"

# 获取当前脚本所在的目录
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
echo "当前脚本所在的目录是: $SCRIPT_DIR"
cd $SCRIPT_DIR

source ./00_varible.sh
docker compose -f configs/docker-compose.yaml restart open-webui
# echo ${DOCKER_ROOT_DIR}

posted on 2024-06-19 22:23 paulwong 阅读(131) 评论(0)  编辑  收藏 所属分类: AI-LLM


只有注册用户登录后才能发表评论。


网站导航: