phxdev Claude commited on
Commit
adcf4a5
·
1 Parent(s): 7c75913

Update project configuration and add core application files

Browse files

- Update README.md with new title and configuration
- Normalize .gitattributes line endings
- Remove old app.py and add new main_gr.py application
- Add requirements.txt, setup.py, start.sh, and Dockerfile for deployment

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (8) hide show
  1. .gitattributes +35 -35
  2. Dockerfile +26 -0
  3. README.md +12 -12
  4. app.py +0 -7
  5. main_gr.py +138 -0
  6. requirements.txt +7 -0
  7. setup.py +36 -0
  8. start.sh +3 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.3.2-devel-ubuntu22.04 AS base
2
+
3
+ # Set environment variables
4
+ ENV DEBIAN_FRONTEND=noninteractive
5
+ WORKDIR /app
6
+
7
+ # Install dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ git \
10
+ curl \
11
+ wget \
12
+ python3 \
13
+ python3-pip \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ COPY . /app/
17
+
18
+ RUN if ! update-alternatives --query python > /dev/null 2>&1; then \
19
+ update-alternatives --install /usr/bin/python python /usr/bin/python3 1; \
20
+ fi
21
+
22
+ RUN pip install -r /app/requirements.txt
23
+ USER root
24
+ RUN chmod +x /app/start.sh
25
+ EXPOSE 7860
26
+ CMD ["python", "/app/setup.py"]
README.md CHANGED
@@ -1,12 +1,12 @@
1
- ---
2
- title: Pixe 3.5
3
- emoji:
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.35.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: PIXE 3.5
3
+ emoji: 📉
4
+ colorFrom: green
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 5.7.1
8
+ app_file: setup.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
main_gr.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import spaces
5
+ import torch
6
+ from diffusers import DiffusionPipeline
7
+ from pruna import SmashConfig, smash
8
+
9
+ dtype = torch.bfloat16
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+
12
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
13
+
14
+
15
+ # Initialize the SmashConfig
16
+ smash_config = SmashConfig()
17
+ smash_config['compilers'] = ['flux_caching']
18
+ smash_config['comp_flux_caching_cache_interval'] = 4 # Higher is faster, but reduces quality
19
+ smash_config['comp_flux_caching_start_step'] = 4 # Best to keep it as the same as cache_interval
20
+ smash_config['comp_flux_caching_compile'] = True # Whether to additionally compile the model for extra speed up
21
+ smash_config['comp_flux_caching_save_model'] = False # Whether to save the model after compilation or just use it for inference
22
+
23
+ pipe = smash(
24
+ model=pipe,
25
+ token='None', # replace <your-token> with your actual token or set to None if you do not have one yet
26
+ smash_config=smash_config,
27
+ )
28
+
29
+ MAX_SEED = np.iinfo(np.int32).max
30
+ MAX_IMAGE_SIZE = 4096
31
+
32
+ @spaces.GPU()
33
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
34
+ if randomize_seed:
35
+ seed = random.randint(0, MAX_SEED)
36
+ generator = torch.Generator().manual_seed(seed)
37
+ image = pipe(
38
+ prompt = prompt,
39
+ width = width,
40
+ height = height,
41
+ num_inference_steps = num_inference_steps,
42
+ generator = generator,
43
+ guidance_scale=0.0
44
+ ).images[0]
45
+ return image, seed
46
+
47
+ examples = [
48
+ "a tiny astronaut hatching from an egg on the moon",
49
+ "a cat holding a sign that says hello world",
50
+ "an anime illustration of a wiener schnitzel",
51
+ ]
52
+
53
+ css="""
54
+ #col-container {
55
+ margin: 0 auto;
56
+ max-width: 520px;
57
+ }
58
+ """
59
+
60
+ with gr.Blocks(css=css) as demo:
61
+
62
+ with gr.Column(elem_id="col-container"):
63
+ gr.Markdown(f"""# FLUX.1 [schnell]
64
+ 12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation
65
+ [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)]
66
+ """)
67
+
68
+ with gr.Row():
69
+
70
+ prompt = gr.Text(
71
+ label="Prompt",
72
+ show_label=False,
73
+ max_lines=1,
74
+ placeholder="Enter your prompt",
75
+ container=False,
76
+ )
77
+
78
+ run_button = gr.Button("Run", scale=0)
79
+
80
+ result = gr.Image(label="Result", show_label=False)
81
+
82
+ with gr.Accordion("Advanced Settings", open=False):
83
+
84
+ seed = gr.Slider(
85
+ label="Seed",
86
+ minimum=0,
87
+ maximum=MAX_SEED,
88
+ step=1,
89
+ value=0,
90
+ )
91
+
92
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
93
+
94
+ with gr.Row():
95
+
96
+ width = gr.Slider(
97
+ label="Width",
98
+ minimum=256,
99
+ maximum=MAX_IMAGE_SIZE,
100
+ step=32,
101
+ value=1024,
102
+ )
103
+
104
+ height = gr.Slider(
105
+ label="Height",
106
+ minimum=256,
107
+ maximum=MAX_IMAGE_SIZE,
108
+ step=32,
109
+ value=1024,
110
+ )
111
+
112
+ with gr.Row():
113
+
114
+
115
+ num_inference_steps = gr.Slider(
116
+ label="Number of inference steps",
117
+ minimum=1,
118
+ maximum=50,
119
+ step=1,
120
+ value=4,
121
+ )
122
+
123
+ gr.Examples(
124
+ examples = examples,
125
+ fn = infer,
126
+ inputs = [prompt],
127
+ outputs = [result, seed],
128
+ cache_examples="lazy"
129
+ )
130
+
131
+ gr.on(
132
+ triggers=[run_button.click, prompt.submit],
133
+ fn = infer,
134
+ inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
135
+ outputs = [result, seed]
136
+ )
137
+
138
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ spaces
2
+ gradio==5.7.1
3
+ pruna
4
+ torch
5
+ numpy
6
+ diffusers
7
+ transformers
setup.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import sys
3
+ import os
4
+
5
+ def install_dependencies():
6
+ """
7
+ Install required dependencies from requirements.txt
8
+ """
9
+ print("Installing dependencies...")
10
+ try:
11
+ subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
12
+ print("Dependencies installed successfully!")
13
+ except subprocess.CalledProcessError as e:
14
+ print(f"Error installing dependencies: {e}")
15
+ sys.exit(1)
16
+
17
+ def run_main_script():
18
+ """
19
+ Run the main gradio script
20
+ """
21
+ print("Starting PIXE 3.5 Image Generator...")
22
+ try:
23
+ subprocess.run([sys.executable, "main_gr.py"], check=True)
24
+ except subprocess.CalledProcessError as e:
25
+ print(f"Error running main script: {e}")
26
+ sys.exit(1)
27
+
28
+ if __name__ == "__main__":
29
+ print("PIXE 3.5 Setup")
30
+ print("===============")
31
+
32
+ # Install dependencies first
33
+ install_dependencies()
34
+
35
+ # Run the main application
36
+ run_main_script()
start.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!
2
+
3
+ #gradio /app/flux-fp8-api/main_gr.py --port 7860 --allow-all-origins