add docker image build script
This commit is contained in:
parent
3035538356
commit
11712a81bc
1
.gitignore
vendored
1
.gitignore
vendored
@ -56,6 +56,7 @@ node_modules
|
||||
build.sh
|
||||
__pycache__/
|
||||
output*
|
||||
input*
|
||||
*.log
|
||||
scripts/tools/
|
||||
weights
|
||||
|
||||
@ -1,4 +1,9 @@
|
||||
FROM embodiedgen:v0.1.2
|
||||
# Switch to root to perform copy and permission changes
|
||||
USER root
|
||||
WORKDIR /EmbodiedGen
|
||||
COPY . .
|
||||
COPY --chown=e_user:e_user . .
|
||||
|
||||
|
||||
USER e_user
|
||||
CMD ["bash", "run.sh"]
|
||||
@ -490,4 +490,4 @@ with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(server_port=8081)
|
||||
demo.launch(server_port=8084)
|
||||
|
||||
169
image_to_3d_service.py
Normal file
169
image_to_3d_service.py
Normal file
@ -0,0 +1,169 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import uuid
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
# Set environment variable BEFORE importing apps.common
|
||||
os.environ["GRADIO_APP"] = "imageto3d"
|
||||
|
||||
import uvicorn
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from PIL import Image
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from apps.common import (
|
||||
preprocess_image_fn,
|
||||
image_to_3d,
|
||||
extract_3d_representations_v3,
|
||||
extract_urdf,
|
||||
TMP_DIR,
|
||||
)
|
||||
|
||||
app = FastAPI(title="Image to 3D Service")
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
class ImageTo3DRequest(BaseModel):
|
||||
image_paths: List[str] = Field(..., description="Path to the input images (e.g. apps/assets/example_image/sample_00.jpg)")
|
||||
output_root: str = Field(..., description="Root directory for saving outputs (e.g. outputs/imageto3d)")
|
||||
n_retry: int = Field(2, description="Number of retries")
|
||||
|
||||
# Optional parameters mirroring the CLI arguments
|
||||
height_range: Optional[str] = None
|
||||
mass_range: Optional[str] = None
|
||||
asset_type: Optional[List[str]] = None
|
||||
skip_exists: bool = False
|
||||
version: Optional[str] = None
|
||||
keep_intermediate: bool = False
|
||||
seed: int = 0
|
||||
disable_decompose_convex: bool = False
|
||||
texture_size: int = 2048
|
||||
|
||||
class MockRequest:
|
||||
def __init__(self, session_hash):
|
||||
self.session_hash = session_hash
|
||||
|
||||
def app_init():
|
||||
# Models are loaded on import of apps.common when GRADIO_APP=imageto3d
|
||||
logger.info("Service initialized, models loaded.")
|
||||
|
||||
@app.post("/process")
|
||||
async def process_images(request: ImageTo3DRequest):
|
||||
start_time = time.time()
|
||||
results = []
|
||||
os.makedirs(request.output_root, exist_ok=True)
|
||||
|
||||
for image_path in request.image_paths:
|
||||
if not os.path.exists(image_path):
|
||||
results.append({"image": image_path, "status": "failed", "error": "File not found"})
|
||||
continue
|
||||
|
||||
image_name = os.path.splitext(os.path.basename(image_path))[0]
|
||||
dest_dir = os.path.join(request.output_root, image_name)
|
||||
|
||||
if request.skip_exists and os.path.exists(dest_dir):
|
||||
if os.listdir(dest_dir): # check if not empty
|
||||
logger.info(f"Skipping {image_name} as it already exists.")
|
||||
results.append({"image": image_path, "status": "skipped", "output_dir": dest_dir})
|
||||
continue
|
||||
|
||||
success = False
|
||||
last_error = None
|
||||
|
||||
for attempt in range(request.n_retry + 1):
|
||||
session_id = str(uuid.uuid4())
|
||||
req = MockRequest(session_hash=session_id)
|
||||
try:
|
||||
logger.info(f"Processing {image_name} (Attempt {attempt + 1}/{request.n_retry + 1})")
|
||||
|
||||
# 1. Preprocess
|
||||
pil_image = Image.open(image_path)
|
||||
# Rembg is default in GUI
|
||||
processed_image, image_cache = preprocess_image_fn(pil_image, rmbg_tag="rembg")
|
||||
|
||||
# 2. Generate 3D
|
||||
state, _ = image_to_3d(
|
||||
image=processed_image,
|
||||
seed=request.seed + attempt, # Varing seed on retry
|
||||
ss_guidance_strength=7.5,
|
||||
ss_sampling_steps=12,
|
||||
slat_guidance_strength=3.0,
|
||||
slat_sampling_steps=12,
|
||||
raw_image_cache=image_cache,
|
||||
is_sam_image=False, # We use auto segmentation (preprocess_image_fn)
|
||||
req=req
|
||||
)
|
||||
|
||||
# 3. Extract Reps
|
||||
_, _, mesh_obj, aligned_gs = extract_3d_representations_v3(
|
||||
state=state,
|
||||
enable_delight=True,
|
||||
texture_size=request.texture_size,
|
||||
req=req
|
||||
)
|
||||
|
||||
# 4. Extract URDF
|
||||
asset_cat = request.asset_type[0] if request.asset_type else ""
|
||||
urdf_zip, est_type, est_height, est_mass, est_mu = extract_urdf(
|
||||
gs_path=aligned_gs,
|
||||
mesh_obj_path=mesh_obj,
|
||||
asset_cat_text=asset_cat,
|
||||
height_range_text=request.height_range or "",
|
||||
mass_range_text=request.mass_range or "",
|
||||
asset_version_text=request.version or "",
|
||||
req=req
|
||||
)
|
||||
# 5. Move results
|
||||
source_file = os.path.join(TMP_DIR, session_id, urdf_zip)
|
||||
if os.path.exists(dest_dir):
|
||||
shutil.rmtree(dest_dir)
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
shutil.copy2(source_file, dest_dir)
|
||||
# source_dir = os.path.join(TMP_DIR, session_id, urdf_zip)
|
||||
|
||||
# if os.path.exists(dest_dir):
|
||||
# shutil.rmtree(dest_dir)
|
||||
# shutil.copytree(source_dir, dest_dir)
|
||||
|
||||
results.append({
|
||||
"image": image_path,
|
||||
"status": "success",
|
||||
"output_dir": dest_dir,
|
||||
"estimated_attrs": {
|
||||
"type": est_type,
|
||||
"height": est_height,
|
||||
"mass": est_mass,
|
||||
"mu": est_mu
|
||||
}
|
||||
})
|
||||
success = True
|
||||
|
||||
# Cleanup session
|
||||
shutil.rmtree(os.path.join(TMP_DIR, session_id), ignore_errors=True)
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {image_path} on attempt {attempt + 1}: {str(e)}")
|
||||
last_error = str(e)
|
||||
# Cleanup session on failure
|
||||
shutil.rmtree(os.path.join(TMP_DIR, session_id), ignore_errors=True)
|
||||
|
||||
if not success:
|
||||
results.append({"image": image_path, "status": "failed", "error": last_error})
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
logger.info(f"Total processing time: {elapsed_time:.2f} seconds")
|
||||
return results
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"status": "ok"}
|
||||
|
||||
if __name__ == "__main__":
|
||||
app_init()
|
||||
uvicorn.run(app, host="0.0.0.0", port=9000)
|
||||
@ -1,85 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import uvicorn
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional
|
||||
|
||||
# Ensure the project root is in sys.path
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
try:
|
||||
# Attempt to import the entrypoint from the existing script
|
||||
# This assumes the environment is set up correctly with all dependencies
|
||||
from embodied_gen.scripts.imageto3d import entrypoint
|
||||
except ImportError as e:
|
||||
print(f"Error importing embodied_gen: {e}")
|
||||
print("Please ensure you are running this script from the project root and environment is configured.")
|
||||
sys.exit(1)
|
||||
|
||||
app = FastAPI(title="Image to 3D Service")
|
||||
|
||||
class ImageTo3DRequest(BaseModel):
|
||||
image_paths: List[str] = Field(..., description="Path to the input images (e.g. apps/assets/example_image/sample_00.jpg)")
|
||||
output_root: str = Field(..., description="Root directory for saving outputs (e.g. outputs/imageto3d)")
|
||||
n_retry: int = Field(2, description="Number of retries")
|
||||
|
||||
# Optional parameters mirroring the CLI arguments
|
||||
height_range: Optional[str] = None
|
||||
mass_range: Optional[str] = None
|
||||
asset_type: Optional[List[str]] = None
|
||||
skip_exists: bool = False
|
||||
version: Optional[str] = None
|
||||
keep_intermediate: bool = False
|
||||
seed: int = 0
|
||||
disable_decompose_convex: bool = False
|
||||
texture_size: int = 2048
|
||||
|
||||
@app.post("/process")
|
||||
async def process_images(request: ImageTo3DRequest):
|
||||
"""
|
||||
HTTP wrapper for img3d-cli.
|
||||
"""
|
||||
# 1. Validate inputs
|
||||
for path in request.image_paths:
|
||||
if not os.path.exists(path):
|
||||
raise HTTPException(status_code=400, detail=f"Image path does not exist: {path}")
|
||||
|
||||
# 2. Invoke the processing logic
|
||||
try:
|
||||
# entrypoint() in imageto3d.py accepts **kwargs to override parsed args
|
||||
entrypoint(
|
||||
image_path=request.image_paths,
|
||||
output_root=request.output_root,
|
||||
n_retry=request.n_retry,
|
||||
|
||||
# Optional args
|
||||
height_range=request.height_range,
|
||||
mass_range=request.mass_range,
|
||||
asset_type=request.asset_type,
|
||||
skip_exists=request.skip_exists,
|
||||
version=request.version,
|
||||
keep_intermediate=request.keep_intermediate,
|
||||
seed=request.seed,
|
||||
disable_decompose_convex=request.disable_decompose_convex,
|
||||
texture_size=request.texture_size
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Processing completed.",
|
||||
"output_root": request.output_root,
|
||||
"inputs": request.image_paths
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
# Catch errors from the processing pipeline
|
||||
raise HTTPException(status_code=500, detail=f"Processing failed: {str(e)}")
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"status": "ok"}
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run server on port 8000
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
25
run.sh
Normal file
25
run.sh
Normal file
@ -0,0 +1,25 @@
|
||||
export CUDA_HOME=/usr/local/cuda-11.8
|
||||
export PATH=/usr/local/cuda-11.8/bin:$PATH
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64:$LD_LIBRARY_PATH
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export TORCH_CUDA_ARCH_LIST="8.0;8.9;9.0"
|
||||
export TCNN_CUDA_ARCHITECTURES=80,89,90
|
||||
|
||||
source /opt/conda/etc/profile.d/conda.sh
|
||||
conda activate embodiedgen
|
||||
chmod -R 777 /EmbodiedGen/apps/sessions
|
||||
DATE_STR=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
|
||||
LOG_FILE="${DATE_STR}.log"
|
||||
exec > >(tee -a "/EmbodiedGen/logs/${LOG_FILE}") 2>&1
|
||||
|
||||
export no_proxy="localhost,127.0.0.1,192.168.48.210,120.48.161.22"
|
||||
export ENDPOINT="https://llmproxy.d-robotics.cc/v1"
|
||||
export API_KEY="sk-B8urDShf4TLeruwI3dB8286485Aa4984A722E945F566EfF4"
|
||||
export MODEL_NAME="azure/gpt-4.1"
|
||||
|
||||
export http_proxy=http://192.168.16.68:18000 https_proxy=http://192.168.16.68:18000
|
||||
if [ -z "$GPUS" ]; then
|
||||
GPUS=0
|
||||
fi
|
||||
CUDA_VISIBLE_DEVICES=$GPUS python image_to_3d_service.py
|
||||
@ -1,4 +1,4 @@
|
||||
IMAGE=embodiedgen:v0.1.1
|
||||
IMAGE=embodiedgen:v0.1.2
|
||||
CONTAINER=EmbodiedGen-docker-${USER}
|
||||
docker pull ${IMAGE}
|
||||
docker run -itd --shm-size="64g" --gpus all --cap-add=SYS_PTRACE \
|
||||
|
||||
14
run_docker_prod.sh
Normal file
14
run_docker_prod.sh
Normal file
@ -0,0 +1,14 @@
|
||||
IMAGE=embodiedgen:v1.10.1.dev
|
||||
CONTAINER=EmbodiedGen-docker-dev-${USER}
|
||||
GPU_ID=6 # 在这里指定 GPU ID
|
||||
|
||||
docker run -itd --shm-size="64g" --gpus "device=${GPU_ID}" --cap-add=SYS_PTRACE \
|
||||
--security-opt seccomp=unconfined --privileged --net=host \
|
||||
-e GPUS=${GPU_ID} \
|
||||
-v /data1/liy/projects/embodiedgen_data/inputs:/EmbodiedGen/inputs \
|
||||
-v /data1/liy/projects/embodiedgen_data/outputs:/EmbodiedGen/outputs \
|
||||
-v /data1/liy/projects/embodiedgen_data/weights:/EmbodiedGen/weights \
|
||||
-v /data1/liy/projects/embodiedgen_data/logs:/EmbodiedGen/logs \
|
||||
--name ${CONTAINER} ${IMAGE}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user