English
John6666 commited on
Commit
d265ac6
·
verified ·
1 Parent(s): a388282

Upload handler.py

Browse files
Files changed (1) hide show
  1. handler.py +8 -6
handler.py CHANGED
@@ -5,8 +5,11 @@ from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, Torch
5
  from PIL import Image
6
  import torch
7
 
8
- #import torch._dynamo
9
- #torch._dynamo.config.suppress_errors = True
 
 
 
10
 
11
  #from huggingface_inference_toolkit.logging import logger
12
 
@@ -18,7 +21,6 @@ def compile_pipeline(pipe) -> Any:
18
 
19
  class EndpointHandler:
20
  def __init__(self, path=""):
21
- is_compile = False
22
  repo_id = "camenduru/FLUX.1-dev-diffusers"
23
  #repo_id = "NoMoreCopyright/FLUX.1-dev-test"
24
  dtype = torch.bfloat16
@@ -26,7 +28,7 @@ class EndpointHandler:
26
  vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype)
27
  #transformer = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", torch_dtype=dtype, quantization_config=quantization_config).to("cuda")
28
  self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config)
29
- if is_compile: self.pipeline = compile_pipeline(self.pipeline)
30
  self.pipeline.to("cuda")
31
 
32
  #@torch.inference_mode()
@@ -45,9 +47,9 @@ class EndpointHandler:
45
 
46
  parameters = data.pop("parameters", {})
47
 
48
- num_inference_steps = parameters.get("num_inference_steps", 30)
49
  width = parameters.get("width", 1024)
50
- height = parameters.get("height", 768)
51
  guidance_scale = parameters.get("guidance_scale", 3.5)
52
 
53
  # seed generator (seed cannot be provided as is but via a generator)
 
5
  from PIL import Image
6
  import torch
7
 
8
+ IS_COMPILE = True
9
+
10
+ if IS_COMPILE:
11
+ import torch._dynamo
12
+ torch._dynamo.config.suppress_errors = True
13
 
14
  #from huggingface_inference_toolkit.logging import logger
15
 
 
21
 
22
  class EndpointHandler:
23
  def __init__(self, path=""):
 
24
  repo_id = "camenduru/FLUX.1-dev-diffusers"
25
  #repo_id = "NoMoreCopyright/FLUX.1-dev-test"
26
  dtype = torch.bfloat16
 
28
  vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype)
29
  #transformer = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", torch_dtype=dtype, quantization_config=quantization_config).to("cuda")
30
  self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config)
31
+ if IS_COMPILE: self.pipeline = compile_pipeline(self.pipeline)
32
  self.pipeline.to("cuda")
33
 
34
  #@torch.inference_mode()
 
47
 
48
  parameters = data.pop("parameters", {})
49
 
50
+ num_inference_steps = parameters.get("num_inference_steps", 28)
51
  width = parameters.get("width", 1024)
52
+ height = parameters.get("height", 1024)
53
  guidance_scale = parameters.get("guidance_scale", 3.5)
54
 
55
  # seed generator (seed cannot be provided as is but via a generator)