Skip to content

Commit

Permalink
apply review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Oct 22, 2024
1 parent b3a8726 commit 0667320
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 15 deletions.
14 changes: 3 additions & 11 deletions optimum/exporters/openvino/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

import enum
import random
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union

Expand Down Expand Up @@ -1590,11 +1589,7 @@ def __init__(
**kwargs,
):
self.task = task
if random_batch_size_range:
low, high = random_batch_size_range
self.batch_size = random.randint(low, high)
else:
self.batch_size = batch_size
self.batch_size = batch_size
self.pooled_projection_dim = normalized_config.config.pooled_projection_dim

def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"):
Expand Down Expand Up @@ -1642,11 +1637,8 @@ def rename_ambiguous_inputs(self, inputs):


@register_in_tasks_manager("t5-encoder-model", *["feature-extraction"], library_name="diffusers")
class T5EncoderOpenVINOConfig(CLIPTextOnnxConfig):
def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)
class T5EncoderOpenVINOConfig(CLIPTextOpenVINOConfig):
pass


class DummyFluxTransformerInputGenerator(DummyVisionInputGenerator):
Expand Down
5 changes: 5 additions & 0 deletions optimum/intel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@
except OptionalDependencyNotAvailable:
from .utils.dummy_openvino_and_diffusers_objects import (
OVDiffusionPipeline,
OVFluxPipeline,
OVLatentConsistencyModelPipeline,
OVPipelineForImage2Image,
OVPipelineForInpainting,
Expand All @@ -287,11 +288,15 @@
else:
from .openvino import (
OVDiffusionPipeline,
OVFluxPipeline,
OVLatentConsistencyModelImg2ImgPipeline,
OVLatentConsistencyModelPipeline,
OVPipelineForImage2Image,
OVPipelineForInpainting,
OVPipelineForText2Image,
OVStableDiffusion3Img2ImgPipeline,
OVStableDiffusion3InpaintPipeline,
OVStableDiffusion3Pipeline,
OVStableDiffusionImg2ImgPipeline,
OVStableDiffusionInpaintPipeline,
OVStableDiffusionPipeline,
Expand Down
6 changes: 2 additions & 4 deletions optimum/intel/openvino/modeling_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,17 +565,15 @@ def to(self, *args, device: Optional[str] = None, dtype: Optional[torch.dtype] =

@property
def height(self) -> int:
# flux transformer does not preserve info about height/width, they are knwon in vae_decoder
model = self.unet.model if self.unet is not None else self.vae.decoder.model
model = self.vae.decoder.model
height = model.inputs[0].get_partial_shape()[2]
if height.is_dynamic:
return -1
return height.get_length() * self.vae_scale_factor

@property
def width(self) -> int:
# flux transformer does not preserve info about height/width, they are known in vae_decoder
model = self.unet.model if self.unet is not None else self.vae.decoder.model
model = self.vae.decoder.model
width = model.inputs[0].get_partial_shape()[3]
if width.is_dynamic:
return -1
Expand Down

0 comments on commit 0667320

Please sign in to comment.