chore: add missing READMEs

This commit is contained in:
Sun-ZhenXing
2025-11-08 21:57:17 +08:00
parent a65a009640
commit febd1601a2
34 changed files with 1806 additions and 167 deletions

View File

@@ -0,0 +1,8 @@
# Docker registry
DOCKER_REGISTRY=docker.io
# Build version
BUILD_VERSION=1.6.0
# Hugging Face Endpoint, optional for China users
# HF_ENDPOINT=https://hf-mirror.com

1
builds/io-paint/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/models

View File

@@ -0,0 +1,18 @@
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-runtime
ARG DEBIAN_FRONTEND=noninteractive
ARG VERSION=1.6.0
WORKDIR /workspace
RUN apt-get update && apt-get install -y --no-install-recommends \
software-properties-common \
libsm6 libxext6 ffmpeg libfontconfig1 libxrender1 libgl1-mesa-glx \
curl python3-pip
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN python3 -m pip install --upgrade pip
RUN pip3 install iopaint==${VERSION} && pip3 cache purge
EXPOSE 8080
CMD ["iopaint", "start", "--model=lama", "--device=cuda", "--port=8080", "--host=0.0.0.0"]

60
builds/io-paint/README.md Normal file
View File

@@ -0,0 +1,60 @@
# IOPaint (Lama Cleaner)
[English](./README.md) | [中文](./README.zh.md)
IOPaint (formerly LaMa Cleaner) is a free and open-source inpainting & outpainting tool powered by SOTA AI model.
## Prerequisites
- NVIDIA GPU with CUDA support
- Docker with NVIDIA runtime support
## Initialization
1. Copy the example environment file:
```bash
cp .env.example .env
```
2. Start the service:
```bash
docker compose up -d
```
3. Access the web interface at <http://localhost:8080>
## Services
- `iopaint`: The IOPaint service.
## Configuration
The service runs on port 8080 and uses CUDA device 0 by default.
| Variable | Description | Default |
| ----------------- | -------------------------------- | ----------- |
| `DOCKER_REGISTRY` | Docker registry to use | `docker.io` |
| `BUILD_VERSION` | Build version | `latest` |
| `HF_ENDPOINT` | Hugging Face endpoint (optional) | - |
## Models
Models are automatically downloaded and cached in the `./models` directory on first use.
## GPU Support
This configuration requires an NVIDIA GPU and uses CUDA device 0. Make sure you have:
- NVIDIA drivers installed
- Docker with NVIDIA runtime support
- nvidia-docker2 package installed
## Reference
- [Dockerfile](https://github.com/Sanster/IOPaint/blob/main/docker/GPUDockerfile)
## License
Please refer to the official IOPaint project for license information.

View File

@@ -0,0 +1,54 @@
# IOPaint (Lama Cleaner)
[English](./README.md) | [中文](./README.zh.md)
IOPaint原 LaMa Cleaner是一个由最先进的 AI 模型驱动的免费开源图像修复和扩展工具。
## 先决条件
- 支持 CUDA 的 NVIDIA GPU
- 支持 NVIDIA 运行时的 Docker
## 初始化
1. 复制示例环境文件:
```bash
cp .env.example .env
```
2. 启动服务:
```bash
docker compose up -d
```
3. 在 <http://localhost:8080> 访问 Web 界面
## 服务
- `iopaint`: IOPaint 服务。
## 配置
服务默认在端口 8080 运行,使用 CUDA 设备 0。
| 变量 | 描述 | 默认值 |
| ----------------- | ------------------------- | ----------- |
| `DOCKER_REGISTRY` | 使用的 Docker 镜像仓库 | `docker.io` |
| `BUILD_VERSION` | 构建版本 | `latest` |
| `HF_ENDPOINT` | Hugging Face 端点(可选) | - |
## 模型
模型在首次使用时会自动下载并缓存在 `./models` 目录中。
## GPU 支持
此配置需要 NVIDIA GPU 并使用 CUDA 设备 0。确保你已安装
- NVIDIA 驱动程序
- 支持 NVIDIA 运行时的 Docker
- nvidia-docker2 软件包
请参考官方 IOPaint 项目的许可信息。

View File

@@ -0,0 +1,47 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
lama-cleaner:
<<: *defaults
image: ${DOCKER_REGISTRY:-docker.io}/local/lama-cleaner:${BUILD_VERSION:-1.6.0}
ports:
- 8080:8080
build:
context: .
dockerfile: Dockerfile
environment:
TZ: ${TZ:-UTC}
HF_ENDPOINT: ${HF_ENDPOINT:-}
volumes:
- ./models:/root/.cache
command:
- iopaint
- start
- --model=lama
- --device=cuda
- --port=8080
- --host=0.0.0.0
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '1.0'
memory: 2G
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [compute, utility]
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s

View File

@@ -0,0 +1,7 @@
# MinerU Docker image
MINERU_DOCKER_IMAGE=alexsuntop/mineru:2.5.4
# Port configurations
MINERU_PORT_OVERRIDE_VLLM=30000
MINERU_PORT_OVERRIDE_API=8000
MINERU_PORT_OVERRIDE_GRADIO=7860

View File

@@ -0,0 +1,27 @@
# Use the official vllm image for gpu with Ampere architecture and above (Compute Capability>=8.0)
# Compute Capability version query (https://developer.nvidia.com/cuda-gpus)
FROM vllm/vllm-openai:v0.10.1.1
# Use the official vllm image for gpu with Turing architecture and below (Compute Capability<8.0)
# FROM vllm/vllm-openai:v0.10.2
# Install libgl for opencv support & Noto fonts for Chinese characters
RUN apt-get update && \
apt-get install -y \
fonts-noto-core \
fonts-noto-cjk \
fontconfig \
libgl1 && \
fc-cache -fv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install mineru latest
RUN python3 -m pip install -U 'mineru[core]' --break-system-packages && \
python3 -m pip cache purge
# Download models and update the configuration file
RUN /bin/bash -c "mineru-models-download -s huggingface -m all"
# Set the entry point to activate the virtual environment and run the command line tool
ENTRYPOINT ["/bin/bash", "-c", "export MINERU_MODEL_SOURCE=local && exec \"$@\"", "--"]

View File

@@ -0,0 +1,45 @@
# MinerU v2
[English](./README.md) | [中文](./README.zh.md)
This service runs MinerU v2. See the [Reference Documentation](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/).
## Start Services
- **VLM backend server**:
```bash
docker compose --profile vllm-server up -d
```
- **Document parse API**:
```bash
docker compose --profile api up -d
```
- **Gradio WebUI**:
```bash
docker compose --profile gradio up -d
```
## Test vLLM backend
```bash
pip install mineru
mineru -p demo.pdf -o ./output -b vlm-http-client -u http://localhost:30000
```
## Services
- `mineru-vllm-server`: The VLM backend server.
- `mineru-api`: The document parsing API.
- `mineru-gradio`: The Gradio WebUI.
## Configuration
- `MINERU_DOCKER_IMAGE`: The Docker image for MinerU, default is `alexsuntop/mineru:2.5.4`.
- `MINERU_PORT_OVERRIDE_VLLM`: The host port for the VLLM server, default is `30000`.
- `MINERU_PORT_OVERRIDE_API`: The host port for the API service, default is `8000`.
- `MINERU_PORT_OVERRIDE_GRADIO`: The host port for the Gradio WebUI, default is `7860`.

View File

@@ -0,0 +1,45 @@
# MinerU v2
[English](./README.md) | [中文](./README.zh.md)
此服务运行 MinerU v2。请参阅[参考文档](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/)。
## 启动服务
- **VLM 后端服务器**:
```bash
docker compose --profile vllm-server up -d
```
- **文档解析 API**:
```bash
docker compose --profile api up -d
```
- **Gradio WebUI**:
```bash
docker compose --profile gradio up -d
```
## 测试 vLLM 后端
```bash
pip install mineru
mineru -p demo.pdf -o ./output -b vlm-http-client -u http://localhost:30000
```
## 服务
- `mineru-vllm-server`: VLM 后端服务器。
- `mineru-api`: 文档解析 API。
- `mineru-gradio`: Gradio WebUI。
## 配置
- `MINERU_DOCKER_IMAGE`: MinerU 的 Docker 镜像,默认为 `alexsuntop/mineru:2.5.4`。
- `MINERU_PORT_OVERRIDE_VLLM`: VLLM 服务器的主机端口,默认为 `30000`。
- `MINERU_PORT_OVERRIDE_API`: API 服务的主机端口,默认为 `8000`。
- `MINERU_PORT_OVERRIDE_GRADIO`: Gradio WebUI 的主机端口,默认为 `7860`。

View File

@@ -0,0 +1,109 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
x-mineru-vllm: &mineru-vllm
<<: *defaults
image: ${MINERU_DOCKER_IMAGE:-alexsuntop/mineru:2.6.2}
build:
context: .
dockerfile: Dockerfile
environment:
TZ: ${TZ:-UTC}
MINERU_MODEL_SOURCE: local
ulimits:
memlock: -1
stack: 67108864
ipc: host
deploy:
resources:
limits:
cpus: '16.0'
memory: 32G
reservations:
cpus: '8.0'
memory: 16G
devices:
- driver: nvidia
device_ids: [ '0' ]
capabilities: [ gpu ]
services:
mineru-vllm-server:
<<: *mineru-vllm
profiles: ["vllm-server"]
ports:
- ${MINERU_PORT_OVERRIDE_VLLM:-30000}:30000
entrypoint: mineru-vllm-server
command:
- --host 0.0.0.0
- --port 30000
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
mineru-api:
<<: *mineru-vllm
profiles: ["api"]
ports:
- ${MINERU_PORT_OVERRIDE_API:-8000}:8000
entrypoint: mineru-api
command:
- --host 0.0.0.0
- --port 8000
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
mineru-gradio:
<<: *mineru-vllm
profiles: ["gradio"]
ports:
- ${MINERU_PORT_OVERRIDE_GRADIO:-7860}:7860
entrypoint: mineru-gradio
command:
- --server-name 0.0.0.0
- --server-port 7860
# Enable the vllm engine for Gradio
- --enable-vllm-engine true
# If you want to disable the API, set this to false
# - --enable-api false
# If you want to limit the number of pages for conversion, set this to a specific number
# - --max-convert-pages 20
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7860/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s