Spaces:
Running
on
Zero
Running
on
Zero
Alexander Bagus
commited on
Commit
·
03e9920
1
Parent(s):
5cdec58
22
Browse files- app.py +7 -8
- examples/0data.json +5 -10
app.py
CHANGED
|
@@ -93,9 +93,10 @@ pipe = ZImageControlPipeline(
|
|
| 93 |
pipe.to("cuda", torch.bfloat16)
|
| 94 |
print("pipe ready.")
|
| 95 |
|
| 96 |
-
def prepare(edit_dict):
|
| 97 |
# return edit_dict['background']
|
| 98 |
-
|
|
|
|
| 99 |
|
| 100 |
@spaces.GPU
|
| 101 |
def inference(
|
|
@@ -271,8 +272,8 @@ with gr.Blocks() as demo:
|
|
| 271 |
output_image = gr.Image(label="Generated image", show_label=False)
|
| 272 |
# polished_prompt = gr.Textbox(label="Polished prompt", interactive=False)
|
| 273 |
|
| 274 |
-
with gr.Accordion("Preprocessor data", open=
|
| 275 |
-
mask_image = gr.Image(label="Mask
|
| 276 |
|
| 277 |
gr.Examples(examples=examples, inputs=[edit_dict, prompt])
|
| 278 |
gr.Markdown(read_file("static/footer.md"))
|
|
@@ -280,8 +281,8 @@ with gr.Blocks() as demo:
|
|
| 280 |
# edit_dict.upload(fn=lambda x: x, inputs=[mask_image], outputs=[input_image])
|
| 281 |
run_button.click(
|
| 282 |
fn=prepare,
|
| 283 |
-
inputs=[edit_dict],
|
| 284 |
-
outputs=[mask_image]
|
| 285 |
).then(
|
| 286 |
fn=inference,
|
| 287 |
inputs=[
|
|
@@ -289,8 +290,6 @@ with gr.Blocks() as demo:
|
|
| 289 |
negative_prompt,
|
| 290 |
edit_dict,
|
| 291 |
mask_image,
|
| 292 |
-
# image_scale,
|
| 293 |
-
# control_mode,
|
| 294 |
control_context_scale,
|
| 295 |
seed,
|
| 296 |
randomize_seed,
|
|
|
|
| 93 |
pipe.to("cuda", torch.bfloat16)
|
| 94 |
print("pipe ready.")
|
| 95 |
|
| 96 |
+
def prepare(edit_dict, prompt):
|
| 97 |
# return edit_dict['background']
|
| 98 |
+
output_image = image_utils.replace_transparent(edit_dict['layers'][0], (0, 0, 0))
|
| 99 |
+
return output_image, prompt
|
| 100 |
|
| 101 |
@spaces.GPU
|
| 102 |
def inference(
|
|
|
|
| 272 |
output_image = gr.Image(label="Generated image", show_label=False)
|
| 273 |
# polished_prompt = gr.Textbox(label="Polished prompt", interactive=False)
|
| 274 |
|
| 275 |
+
with gr.Accordion("Preprocessor data", open=False):
|
| 276 |
+
mask_image = gr.Image(label="Generated Mask", interactive=False)
|
| 277 |
|
| 278 |
gr.Examples(examples=examples, inputs=[edit_dict, prompt])
|
| 279 |
gr.Markdown(read_file("static/footer.md"))
|
|
|
|
| 281 |
# edit_dict.upload(fn=lambda x: x, inputs=[mask_image], outputs=[input_image])
|
| 282 |
run_button.click(
|
| 283 |
fn=prepare,
|
| 284 |
+
inputs=[edit_dict, prompt],
|
| 285 |
+
outputs=[mask_image, prompt]
|
| 286 |
).then(
|
| 287 |
fn=inference,
|
| 288 |
inputs=[
|
|
|
|
| 290 |
negative_prompt,
|
| 291 |
edit_dict,
|
| 292 |
mask_image,
|
|
|
|
|
|
|
| 293 |
control_context_scale,
|
| 294 |
seed,
|
| 295 |
randomize_seed,
|
examples/0data.json
CHANGED
|
@@ -1,27 +1,22 @@
|
|
| 1 |
[
|
| 2 |
[
|
| 3 |
"examples/pose2.jpg",
|
| 4 |
-
"
|
| 5 |
-
"HED"
|
| 6 |
],
|
| 7 |
[
|
| 8 |
"examples/bottle.jpg",
|
| 9 |
-
"
|
| 10 |
-
"HED"
|
| 11 |
],
|
| 12 |
[
|
| 13 |
"examples/room.jpg",
|
| 14 |
-
"
|
| 15 |
-
"Depth"
|
| 16 |
],
|
| 17 |
[
|
| 18 |
"examples/pose1.jpg",
|
| 19 |
-
"
|
| 20 |
-
"Pose"
|
| 21 |
],
|
| 22 |
[
|
| 23 |
"examples/bird.jpg",
|
| 24 |
-
"
|
| 25 |
-
"Canny"
|
| 26 |
]
|
| 27 |
]
|
|
|
|
| 1 |
[
|
| 2 |
[
|
| 3 |
"examples/pose2.jpg",
|
| 4 |
+
"Red bikini"
|
|
|
|
| 5 |
],
|
| 6 |
[
|
| 7 |
"examples/bottle.jpg",
|
| 8 |
+
"a cup of coffee"
|
|
|
|
| 9 |
],
|
| 10 |
[
|
| 11 |
"examples/room.jpg",
|
| 12 |
+
"Starry night painting by Van Gogh"
|
|
|
|
| 13 |
],
|
| 14 |
[
|
| 15 |
"examples/pose1.jpg",
|
| 16 |
+
"Silver armor"
|
|
|
|
| 17 |
],
|
| 18 |
[
|
| 19 |
"examples/bird.jpg",
|
| 20 |
+
"Green feather."
|
|
|
|
| 21 |
]
|
| 22 |
]
|