This commit is contained in:
90
.github/workflows/mineru-build.yaml
vendored
Normal file
90
.github/workflows/mineru-build.yaml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ['v-mineru-*']
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: https://mac.alexsun.top:3000/actions/checkout@v4
|
||||
|
||||
- name: Extract version from tag
|
||||
id: set-version
|
||||
run: |
|
||||
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||
BUILD_VERSION=${TAG_NAME#v-mineru-}
|
||||
echo "BUILD_VERSION=$BUILD_VERSION" >> $GITHUB_ENV
|
||||
echo "Extracted version: $BUILD_VERSION"
|
||||
|
||||
- name: Build Docker images
|
||||
id: build
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cd mineru
|
||||
docker compose build --no-cache
|
||||
env:
|
||||
BUILD_VERSION: ${{ env.BUILD_VERSION }}
|
||||
|
||||
- name: Push Docker images
|
||||
id: push
|
||||
if: steps.build.outcome == 'success'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cd mineru
|
||||
docker compose push
|
||||
env:
|
||||
BUILD_VERSION: ${{ env.BUILD_VERSION }}
|
||||
|
||||
- name: Push Docker images
|
||||
id: push-latest
|
||||
if: steps.build.outcome == 'success'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cd mineru
|
||||
docker compose build
|
||||
docker compose push
|
||||
env:
|
||||
BUILD_VERSION: latest
|
||||
|
||||
- name: Send success notification to Feishu
|
||||
if: steps.push.outcome == 'success'
|
||||
uses: https://mac.alexsun.top:3000/actions/webhook-action@master
|
||||
with:
|
||||
url: ${{ secrets.FEISHU_WEBHOOK }}
|
||||
method: POST
|
||||
body: |
|
||||
{
|
||||
"msg_type": "text",
|
||||
"content": {
|
||||
"text": "镜像 ${{ env.REPO_NAME }}:${{ env.BUILD_VERSION }} 已成功构建并推送!"
|
||||
}
|
||||
}
|
||||
env:
|
||||
REPO_NAME: ${{ github.event.repository.name }}
|
||||
BUILD_VERSION: ${{ env.BUILD_VERSION }}
|
||||
|
||||
- name: Send failure notification to Feishu
|
||||
if: steps.build.outcome == 'failure' || steps.push.outcome == 'failure' || steps.push-latest.outcome == 'failure'
|
||||
uses: https://mac.alexsun.top:3000/actions/webhook-action@master
|
||||
with:
|
||||
url: ${{ secrets.FEISHU_WEBHOOK }}
|
||||
method: POST
|
||||
body: |
|
||||
{
|
||||
"msg_type": "text",
|
||||
"content": {
|
||||
"text": "镜像 ${{ env.REPO_NAME }}:${{ env.BUILD_VERSION }} 构建或推送失败!"
|
||||
}
|
||||
}
|
||||
env:
|
||||
REPO_NAME: ${{ github.event.repository.name }}
|
||||
BUILD_VERSION: ${{ env.BUILD_VERSION }}
|
||||
|
||||
- name: Fail workflow if build or push failed
|
||||
if: steps.build.outcome == 'failure' || steps.push.outcome == 'failure' || steps.push-latest.outcome == 'failure'
|
||||
run: |
|
||||
echo "Build or push step failed. Failing the workflow."
|
||||
exit 1
|
||||
4
Makefile
Normal file
4
Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
.PHONY: update
|
||||
|
||||
update:
|
||||
curl -o mineru/Dockerfile "https://git.mac.alexsun.top:3000/mirror/MinerU/raw/branch/master/docker/china/Dockerfile"
|
||||
29
mineru/Dockerfile
Normal file
29
mineru/Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
# Use DaoCloud mirrored vllm image for China region for gpu with Ampere、Ada Lovelace、Hopper architecture (8.0 <= Compute Capability <= 9.0)
|
||||
# Compute Capability version query (https://developer.nvidia.com/cuda-gpus)
|
||||
# only support x86_64 architecture
|
||||
FROM docker.m.daocloud.io/vllm/vllm-openai:v0.10.1.1
|
||||
|
||||
# Use DaoCloud mirrored vllm image for China region for gpu with Volta、Turing、Blackwell architecture (7.0 < Compute Capability < 8.0 or Compute Capability >= 10.0)
|
||||
# support x86_64 architecture and ARM(AArch64) architecture
|
||||
# FROM docker.m.daocloud.io/vllm/vllm-openai:v0.11.0
|
||||
|
||||
# Install libgl for opencv support & Noto fonts for Chinese characters
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
fonts-noto-core \
|
||||
fonts-noto-cjk \
|
||||
fontconfig \
|
||||
libgl1 && \
|
||||
fc-cache -fv && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install mineru latest
|
||||
RUN python3 -m pip install -U 'mineru[core]>=2.7.0' -i https://mirrors.aliyun.com/pypi/simple --break-system-packages && \
|
||||
python3 -m pip cache purge
|
||||
|
||||
# Download models and update the configuration file
|
||||
RUN /bin/bash -c "mineru-models-download -s modelscope -m all"
|
||||
|
||||
# Set the entry point to activate the virtual environment and run the command line tool
|
||||
ENTRYPOINT ["/bin/bash", "-c", "export MINERU_MODEL_SOURCE=local && exec \"$@\"", "--"]
|
||||
70
mineru/docker-compose.yaml
Normal file
70
mineru/docker-compose.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
x-mineru-vllm: &mineru-vllm
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}alexsuntop/mineru:${BUILD_VERSION:-2.7.0}
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
environment:
|
||||
TZ: ${TZ:-UTC}
|
||||
MINERU_MODEL_SOURCE: local
|
||||
ulimits:
|
||||
memlock: -1
|
||||
stack: 67108864
|
||||
ipc: host
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "16.0"
|
||||
memory: 32G
|
||||
reservations:
|
||||
cpus: "8.0"
|
||||
memory: 16G
|
||||
devices:
|
||||
- driver: nvidia
|
||||
device_ids: ["0"]
|
||||
capabilities: [gpu]
|
||||
|
||||
services:
|
||||
mineru-openai-server:
|
||||
<<: *mineru-vllm
|
||||
ports:
|
||||
- ${MINERU_PORT_OVERRIDE_VLLM:-30000}:30000
|
||||
entrypoint: mineru-openai-server
|
||||
command:
|
||||
# ==================== Engine Selection ====================
|
||||
# WARNING: Only ONE engine can be enabled at a time!
|
||||
# Choose 'vllm' OR 'lmdeploy' (uncomment one line below)
|
||||
- --engine vllm
|
||||
# --engine lmdeploy
|
||||
|
||||
# ==================== vLLM Engine Parameters ====================
|
||||
# Uncomment if using --engine vllm
|
||||
- --host 0.0.0.0
|
||||
- --port 30000
|
||||
# Multi-GPU configuration (increase throughput)
|
||||
# --data-parallel-size 2
|
||||
# Single GPU memory optimization (reduce if VRAM insufficient)
|
||||
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if issues persist
|
||||
|
||||
# ==================== LMDeploy Engine Parameters ====================
|
||||
# Uncomment if using --engine lmdeploy
|
||||
# --server-name 0.0.0.0
|
||||
# --server-port 30000
|
||||
# Multi-GPU configuration (increase throughput)
|
||||
# --dp 2
|
||||
# Single GPU memory optimization (reduce if VRAM insufficient)
|
||||
# --cache-max-entry-count 0.5 # Try 0.4 or lower if issues persist
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
Reference in New Issue
Block a user