Update app.py
Browse files
app.py
CHANGED
|
@@ -108,7 +108,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 108 |
pipe = QwenImageEditPlusPipeline.from_pretrained(
|
| 109 |
"Qwen/Qwen-Image-Edit-2509",
|
| 110 |
transformer=QwenImageTransformer2DModel.from_pretrained(
|
| 111 |
-
"linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 112 |
subfolder='transformer',
|
| 113 |
torch_dtype=dtype,
|
| 114 |
device_map='cuda'
|
|
@@ -140,13 +140,6 @@ pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
|
|
| 140 |
pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
|
| 141 |
weight_name="loras/Qwen4Play_v2.safetensors",
|
| 142 |
adapter_name="upscale-image")
|
| 143 |
-
# pipe.load_lora_weights("lovis93/next-scene-qwen-image-lora-2509",
|
| 144 |
-
# weight_name="next-scene_lora-v2-3000.safetensors",
|
| 145 |
-
# adapter_name="next-scene")
|
| 146 |
-
# pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
|
| 147 |
-
# weight_name="loras/Qwen_Nsfw_Body_V14-10K.safetensors",
|
| 148 |
-
# adapter_name="upscale-image")
|
| 149 |
-
|
| 150 |
|
| 151 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 152 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -226,22 +219,22 @@ def infer(
|
|
| 226 |
).images
|
| 227 |
|
| 228 |
try:
|
| 229 |
-
#
|
| 230 |
cmd = [
|
| 231 |
'ffmpeg',
|
| 232 |
-
'-i', 'pipe:0',
|
| 233 |
-
'-vf', 'gblur=sigma=25',
|
| 234 |
-
'-f', 'image2pipe',
|
| 235 |
-
'-c:v', 'png',
|
| 236 |
-
'pipe:1'
|
| 237 |
]
|
| 238 |
|
| 239 |
-
#
|
| 240 |
img_byte_arr = BytesIO()
|
| 241 |
-
images[0].save(img_byte_arr, format='PNG')
|
| 242 |
img_data = img_byte_arr.getvalue()
|
| 243 |
|
| 244 |
-
#
|
| 245 |
result = subprocess.run(
|
| 246 |
cmd,
|
| 247 |
input=img_data,
|
|
@@ -249,14 +242,11 @@ def infer(
|
|
| 249 |
check=True
|
| 250 |
)
|
| 251 |
|
| 252 |
-
#
|
| 253 |
blurred_image_data = BytesIO(result.stdout)
|
| 254 |
blurred_image = Image.open(blurred_image_data)
|
| 255 |
|
| 256 |
-
#
|
| 257 |
-
# Далее вы можете сохранить его или использовать как нужно
|
| 258 |
-
|
| 259 |
-
# Если всё же нужен временный файл для результата:
|
| 260 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as blur_tmpfile:
|
| 261 |
blurred_image.save(blur_tmpfile.name, 'JPEG')
|
| 262 |
blurred_img_path = blur_tmpfile.name
|
|
@@ -264,16 +254,7 @@ def infer(
|
|
| 264 |
return images[0], blurred_img_path, seed
|
| 265 |
|
| 266 |
except Exception as e:
|
| 267 |
-
raise Exception(f"
|
| 268 |
-
|
| 269 |
-
@spaces.GPU(duration=60)
|
| 270 |
-
def infer_example(input_image, prompt, lora_adapter):
|
| 271 |
-
input_pil = input_image.convert("RGB")
|
| 272 |
-
guidance_scale = 1.0
|
| 273 |
-
steps = 5
|
| 274 |
-
result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
|
| 275 |
-
return result, seed
|
| 276 |
-
|
| 277 |
|
| 278 |
css="""
|
| 279 |
#col-container {
|
|
@@ -315,31 +296,6 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
|
| 315 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 316 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 317 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
|
| 318 |
-
|
| 319 |
-
gr.Examples(
|
| 320 |
-
examples=[
|
| 321 |
-
["examples/1.jpg", "Transform into anime.", "Photo-to-Anime"],
|
| 322 |
-
["examples/5.jpg", "Remove shadows and relight the image using soft lighting.", "Light-Restoration"],
|
| 323 |
-
["examples/4.jpg", "Use a subtle golden-hour filter with smooth light diffusion.", "Relight"],
|
| 324 |
-
["examples/2.jpeg", "Rotate the camera 45 degrees to the left.", "Multiple-Angles"],
|
| 325 |
-
["examples/7.jpg", "Light source from the Right Rear", "Multi-Angle-Lighting"],
|
| 326 |
-
["examples/10.jpeg", "Upscale the image.", "Upscale-Image"],
|
| 327 |
-
["examples/7.jpg", "Light source from the Below", "Multi-Angle-Lighting"],
|
| 328 |
-
["examples/2.jpeg", "Switch the camera to a top-down right corner view.", "Multiple-Angles"],
|
| 329 |
-
["examples/9.jpg", "The camera moves slightly forward as sunlight breaks through the clouds, casting a soft glow around the character's silhouette in the mist. Realistic cinematic style, atmospheric depth.", "Next-Scene"],
|
| 330 |
-
["examples/8.jpg", "Make the subjects skin details more prominent and natural.", "Edit-Skin"],
|
| 331 |
-
["examples/6.jpg", "Switch the camera to a bottom-up view.", "Multiple-Angles"],
|
| 332 |
-
["examples/6.jpg", "Rotate the camera 180 degrees upside down.", "Multiple-Angles"],
|
| 333 |
-
["examples/4.jpg", "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
|
| 334 |
-
["examples/4.jpg", "Switch the camera to a top-down view.", "Multiple-Angles"],
|
| 335 |
-
["examples/4.jpg", "Switch the camera to a wide-angle lens.", "Multiple-Angles"],
|
| 336 |
-
],
|
| 337 |
-
inputs=[input_image, prompt, lora_adapter],
|
| 338 |
-
outputs=[output_image1, output_image2, seed],
|
| 339 |
-
fn=infer_example,
|
| 340 |
-
cache_examples=False,
|
| 341 |
-
label="Examples"
|
| 342 |
-
)
|
| 343 |
|
| 344 |
run_button.click(
|
| 345 |
fn=infer,
|
|
|
|
| 108 |
pipe = QwenImageEditPlusPipeline.from_pretrained(
|
| 109 |
"Qwen/Qwen-Image-Edit-2509",
|
| 110 |
transformer=QwenImageTransformer2DModel.from_pretrained(
|
| 111 |
+
"linoyts/Qwen-Image-Edit-Rapid-AIO",
|
| 112 |
subfolder='transformer',
|
| 113 |
torch_dtype=dtype,
|
| 114 |
device_map='cuda'
|
|
|
|
| 140 |
pipe.load_lora_weights("wiikoo/Qwen-lora-nsfw",
|
| 141 |
weight_name="loras/Qwen4Play_v2.safetensors",
|
| 142 |
adapter_name="upscale-image")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 145 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 219 |
).images
|
| 220 |
|
| 221 |
try:
|
| 222 |
+
# Prepare ffmpeg command
|
| 223 |
cmd = [
|
| 224 |
'ffmpeg',
|
| 225 |
+
'-i', 'pipe:0',
|
| 226 |
+
'-vf', 'gblur=sigma=25',
|
| 227 |
+
'-f', 'image2pipe',
|
| 228 |
+
'-c:v', 'png',
|
| 229 |
+
'pipe:1'
|
| 230 |
]
|
| 231 |
|
| 232 |
+
# Convert PIL Image to bytes
|
| 233 |
img_byte_arr = BytesIO()
|
| 234 |
+
images[0].save(img_byte_arr, format='PNG')
|
| 235 |
img_data = img_byte_arr.getvalue()
|
| 236 |
|
| 237 |
+
# Run ffmpeg with pipe
|
| 238 |
result = subprocess.run(
|
| 239 |
cmd,
|
| 240 |
input=img_data,
|
|
|
|
| 242 |
check=True
|
| 243 |
)
|
| 244 |
|
| 245 |
+
# Read result from stdout
|
| 246 |
blurred_image_data = BytesIO(result.stdout)
|
| 247 |
blurred_image = Image.open(blurred_image_data)
|
| 248 |
|
| 249 |
+
# Save blurred image to temp file
|
|
|
|
|
|
|
|
|
|
| 250 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as blur_tmpfile:
|
| 251 |
blurred_image.save(blur_tmpfile.name, 'JPEG')
|
| 252 |
blurred_img_path = blur_tmpfile.name
|
|
|
|
| 254 |
return images[0], blurred_img_path, seed
|
| 255 |
|
| 256 |
except Exception as e:
|
| 257 |
+
raise Exception(f"Blur error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
css="""
|
| 260 |
#col-container {
|
|
|
|
| 296 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 297 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 298 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
|
| 300 |
run_button.click(
|
| 301 |
fn=infer,
|