Alexander Bagus commited on
Commit
5b96bb2
Β·
1 Parent(s): 03e9920
Files changed (5) hide show
  1. README.md +6 -2
  2. app.py +2 -33
  3. examples/0data.json +0 -8
  4. static/footer.md +16 -8
  5. utils/repo_utils.py +1 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: ZIT Inpaint
3
  emoji: πŸ’ƒ
4
  colorFrom: blue
5
  colorTo: yellow
@@ -8,7 +8,11 @@ sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- short_description: Edit image using a mask (selective changes to areas)
 
 
 
 
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Z-Image Turbo (ZIT) - Inpaint
3
  emoji: πŸ’ƒ
4
  colorFrom: blue
5
  colorTo: yellow
 
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: Edit image by changing specific areas (mask)
12
+ models:
13
+ - alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.0
14
+ - alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union
15
+ - Tongyi-MAI/Z-Image-Turbo
16
  ---
17
 
18
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -95,6 +95,7 @@ print("pipe ready.")
95
 
96
  def prepare(edit_dict, prompt):
97
  # return edit_dict['background']
 
98
  output_image = image_utils.replace_transparent(edit_dict['layers'][0], (0, 0, 0))
99
  return output_image, prompt
100
 
@@ -104,8 +105,6 @@ def inference(
104
  negative_prompt,
105
  edit_dict,
106
  mask_image,
107
- # image_scale=1.0,
108
- # control_mode='Canny',
109
  control_context_scale = 0.75,
110
  seed=42,
111
  randomize_seed=True,
@@ -122,36 +121,11 @@ def inference(
122
  print("Error: edit_dict is empty.")
123
  return None
124
 
125
- # print(edit_dict)
126
- # input_image, width, height = scale_image(input_image, image_scale)
127
- # control_mode='HED'
128
- # processor_id = 'canny'
129
- # if control_mode == 'HED':
130
- # processor_id = 'softedge_hed'
131
- # if control_mode =='Depth':
132
- # processor_id = 'depth_midas'
133
- # if control_mode =='MLSD':
134
- # processor_id = 'mlsd'
135
- # if control_mode =='Pose':
136
- # processor_id = 'openpose_full'
137
-
138
- # print(f"DEBUG: processor_id={processor_id}")
139
- # processor = Processor(processor_id)
140
-
141
- # Width must be divisible by 16
142
-
143
- # control_image, width, height = image_utils.rescale_image(input_image, image_scale, 16)
144
- # control_image = control_image.resize((1024, 1024))
145
  width, height = edit_dict['background'].size
146
 
147
  print("DEBUG: control_image_torch")
148
  sample_size = [height, width]
149
- # control_image = processor(control_image, to_pil=True)
150
- # control_image = control_image.resize((width, height))
151
- # control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
152
 
153
- # mask_image = edit_dict['layers'][0]
154
- # mask_image = edit_dict['composite']
155
  if mask_image is not None:
156
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
157
  else:
@@ -221,12 +195,7 @@ with gr.Blocks() as demo:
221
  placeholder="Enter your prompt",
222
  # container=False,
223
  )
224
- # is_polish_prompt = gr.Checkbox(label="Polish prompt", value=True)
225
- # control_mode = gr.Radio(
226
- # choices=["Canny", "Depth", "HED", "MLSD", "Pose"],
227
- # value="Canny",
228
- # label="Control Mode"
229
- # )
230
  run_button = gr.Button("Generate", variant="primary")
231
  with gr.Accordion("Advanced Settings", open=False):
232
  negative_prompt = gr.Textbox(
 
95
 
96
  def prepare(edit_dict, prompt):
97
  # return edit_dict['background']
98
+ if not prompt: prompt = "Ultra HD, 4K"
99
  output_image = image_utils.replace_transparent(edit_dict['layers'][0], (0, 0, 0))
100
  return output_image, prompt
101
 
 
105
  negative_prompt,
106
  edit_dict,
107
  mask_image,
 
 
108
  control_context_scale = 0.75,
109
  seed=42,
110
  randomize_seed=True,
 
121
  print("Error: edit_dict is empty.")
122
  return None
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  width, height = edit_dict['background'].size
125
 
126
  print("DEBUG: control_image_torch")
127
  sample_size = [height, width]
 
 
 
128
 
 
 
129
  if mask_image is not None:
130
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
131
  else:
 
195
  placeholder="Enter your prompt",
196
  # container=False,
197
  )
198
+
 
 
 
 
 
199
  run_button = gr.Button("Generate", variant="primary")
200
  with gr.Accordion("Advanced Settings", open=False):
201
  negative_prompt = gr.Textbox(
examples/0data.json CHANGED
@@ -7,16 +7,8 @@
7
  "examples/bottle.jpg",
8
  "a cup of coffee"
9
  ],
10
- [
11
- "examples/room.jpg",
12
- "Starry night painting by Van Gogh"
13
- ],
14
  [
15
  "examples/pose1.jpg",
16
  "Silver armor"
17
- ],
18
- [
19
- "examples/bird.jpg",
20
- "Green feather."
21
  ]
22
  ]
 
7
  "examples/bottle.jpg",
8
  "a cup of coffee"
9
  ],
 
 
 
 
10
  [
11
  "examples/pose1.jpg",
12
  "Silver armor"
 
 
 
 
13
  ]
14
  ]
static/footer.md CHANGED
@@ -1,16 +1,24 @@
1
 
 
 
 
2
  ## Usage
3
- - **Polish Prompt**: ZIT needs a detailed prompt, which you can get by enabling polish prompt.
4
- - **Context Scale**: Similar to strength, the higher the value, the more detail is preserved. The recommended control_context_scale range is 0.65 to 0.80.
5
- - **Image Scale**: Upscale/downscale image resolution.
 
 
 
 
6
 
7
- # Version History
8
- 2025-12-13: Updated to Controlnet Union v2.0
 
 
 
 
9
 
10
  ## References
11
  - **alibaba-pai - Controlnet Union v2.0**: <https://huggingface.co/alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.0>
12
  - **VideoX-Fun**: <https://github.com/aigc-apps/VideoX-Fun>
13
  - **Tongyi-MAI**: <https://huggingface.co/Tongyi-MAI/Z-Image-Turbo>
14
-
15
-
16
- <!-- https://github.com/comfyanonymous/ComfyUI/pull/11062 -->
 
1
 
2
+ ## What does this app do?
3
+ This AI image editor allows you to transform your pictures by simply applying a mask and providing a text prompt. You can select specific areas of your image with the mask and then describe the desired changes using natural language. The app will then intelligently edit the selected area of your image based on your instructions.
4
+
5
  ## Usage
6
+ - **Edit Image (required)**: Upload the image you want to edit, then mark the area you want to change.
7
+ - **Prompt (required)**: Type a description of the changes you want to make to the masked area.
8
+ - **Context Scale**: Higher values preserve more detail from the original image.
9
+
10
+ ### Tips
11
+ If the result isn't great, try marking a bigger area or lowering the *context scale*.
12
+
13
 
14
+ ## ComfyUI
15
+ As of this writing, ComfyUI integration isn't supported yet.
16
+ You can follow updates here: <https://github.com/comfyanonymous/ComfyUI/pull/11304>
17
+
18
+ ## Version History
19
+ 2025-12-14: Published to huggingface space.
20
 
21
  ## References
22
  - **alibaba-pai - Controlnet Union v2.0**: <https://huggingface.co/alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.0>
23
  - **VideoX-Fun**: <https://github.com/aigc-apps/VideoX-Fun>
24
  - **Tongyi-MAI**: <https://huggingface.co/Tongyi-MAI/Z-Image-Turbo>
 
 
 
utils/repo_utils.py CHANGED
@@ -46,6 +46,7 @@ def move_folder(source_path, destination_path):
46
  if shutil.os.path.exists(destination_path):
47
  shutil.rmtree(destination_path) # Remove existing destination folder
48
  shutil.copytree(source_path, destination_path)
 
49
  except Exception as e:
50
  print(f"Error move_folder: {e}")
51
 
 
46
  if shutil.os.path.exists(destination_path):
47
  shutil.rmtree(destination_path) # Remove existing destination folder
48
  shutil.copytree(source_path, destination_path)
49
+ print(f"Folder '{source_path}' has copied to '{destination_path}")
50
  except Exception as e:
51
  print(f"Error move_folder: {e}")
52