Skip to content

Commit

Permalink
Correct spelling of PARSeq
Browse files Browse the repository at this point in the history
  • Loading branch information
gowthamkpr committed Jan 14, 2025
1 parent 055bddc commit 7e5794e
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 25 deletions.
2 changes: 1 addition & 1 deletion keras_hub/api/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
PaliGemmaImageConverter,
)
from keras_hub.src.models.parseq.parseq_image_converter import (
ParseQImageConverter,
PARSeqImageConverter,
)
from keras_hub.src.models.resnet.resnet_image_converter import (
ResNetImageConverter,
Expand Down
6 changes: 3 additions & 3 deletions keras_hub/api/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,9 +250,9 @@
from keras_hub.src.models.pali_gemma.pali_gemma_tokenizer import (
PaliGemmaTokenizer,
)
from keras_hub.src.models.parseq.parseq_backbone import ParseQBackbone
from keras_hub.src.models.parseq.parseq_ocr import ParseQOCR
from keras_hub.src.models.parseq.parseq_preprocessor import ParseQPreprocessor
from keras_hub.src.models.parseq.parseq_backbone import PARSeqBackbone
from keras_hub.src.models.parseq.parseq_ocr import PARSeqOCR
from keras_hub.src.models.parseq.parseq_preprocessor import PARSeqPreprocessor
from keras_hub.src.models.phi3.phi3_backbone import Phi3Backbone
from keras_hub.src.models.phi3.phi3_causal_lm import Phi3CausalLM
from keras_hub.src.models.phi3.phi3_causal_lm_preprocessor import (
Expand Down
6 changes: 4 additions & 2 deletions keras_hub/src/models/parseq/parseq_backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(
drop_rate,
attn_drop_rate,
drop_path_rate,
name="parseq_vit"
name="parseq_vit",
)

# Configure the decoder layer parameters
Expand All @@ -65,7 +65,9 @@ def __init__(
)

# Embedding layer for input tokens
text_embed = TokenEmbedding(out_channels, embed_dim, name="parseq_embed")
text_embed = TokenEmbedding(
out_channels, embed_dim, name="parseq_embed"
)

dropout_layer = layers.Dropout(dropout, name="parseq_dropout")
# Output head to project decoder outputs to token probabilities
Expand Down
36 changes: 27 additions & 9 deletions keras_hub/src/models/parseq/parseq_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,34 @@ def __init__(
):
super(DecoderLayer, self).__init__()
self.self_attn = layers.MultiHeadAttention(
num_heads=nhead, key_dim=d_model//nhead, dropout=dropout, name=f"{name}_sattn"
num_heads=nhead,
key_dim=d_model // nhead,
dropout=dropout,
name=f"{name}_sattn",
)
self.cross_attn = layers.MultiHeadAttention(
num_heads=nhead, key_dim=d_model//nhead, dropout=dropout, name=f"{name}_xattn"
num_heads=nhead,
key_dim=d_model // nhead,
dropout=dropout,
name=f"{name}_xattn",
)
self.linear1 = layers.Dense(
dim_feedforward, activation=activation, name=f"{name}_dense1"
)
self.linear1 = layers.Dense(dim_feedforward, activation=activation, name=f"{name}_dense1")
self.dropout = layers.Dropout(dropout, name=f"{name}_dropout")
self.linear2 = layers.Dense(d_model, name=f"{name}_dense2")
self.norm1 = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm1")
self.norm2 = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm2")
self.norm_q = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_normq")
self.norm_c = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_normc")
self.norm1 = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm1"
)
self.norm2 = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm2"
)
self.norm_q = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_normq"
)
self.norm_c = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_normc"
)
self.dropout1 = layers.Dropout(dropout, name=f"{name}_dropout1")
self.dropout2 = layers.Dropout(dropout, name=f"{name}_dropout2")
self.dropout3 = layers.Dropout(dropout, name=f"{name}_dropout3")
Expand Down Expand Up @@ -116,11 +132,13 @@ def __init__(
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
name=f"{name}_layer{i}"
name=f"{name}_layer{i}",
)
for i in range(num_layers)
]
self.norm = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm")
self.norm = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm"
)

def call(
self,
Expand Down
34 changes: 24 additions & 10 deletions keras_hub/src/models/parseq/parseq_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,9 @@ def __init__(
hidden_features if hidden_features is not None else in_features
)

self.fc1 = layers.Dense(hidden_features, activation=act, name=f"{name}_dense1")
self.fc1 = layers.Dense(
hidden_features, activation=act, name=f"{name}_dense1"
)
self.drop1 = layers.Dropout(drop, name=f"{name}_dropout1")
self.fc2 = layers.Dense(out_features, name=f"{name}_dense2")
self.drop2 = layers.Dropout(drop, name=f"{name}_dropout2")
Expand All @@ -85,7 +87,13 @@ class PatchEmbed(layers.Layer):
"""

def __init__(
self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, name="patchembed", **kwargs
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
name="patchembed",
**kwargs,
):
super().__init__(**kwargs, name=name)
img_size = to_2tuple(img_size)
Expand All @@ -102,7 +110,7 @@ def __init__(
kernel_size=patch_size,
strides=patch_size,
padding="valid",
name=f"{name}_conv"
name=f"{name}_conv",
)

def call(self, x):
Expand Down Expand Up @@ -146,7 +154,7 @@ def __init__(
key_dim=dim // num_heads,
dropout=attn_drop,
use_bias=qkv_bias,
name=f"{name}_mha"
name=f"{name}_mha",
)
self.proj_drop = layers.Dropout(proj_drop, name=f"{name}_dropout")

Expand Down Expand Up @@ -174,27 +182,31 @@ def __init__(
):
super().__init__(**kwargs, name=name)

self.norm1 = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm1")
self.norm1 = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm1"
)
self.attn = Attention(
dim=dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop,
name=f"{name}_attn"
name=f"{name}_attn",
)
self.drop_path = (
DropPath(drop_path) if drop_path > 0.0 else (lambda x: x)
)

self.norm2 = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm2")
self.norm2 = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm2"
)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act=act,
drop=drop,
name=f"{name}_mlp"
name=f"{name}_mlp",
)

def call(self, x, training=False):
Expand Down Expand Up @@ -264,11 +276,13 @@ def __init__(
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr_values[i],
name=f"{name}_block{i}"
name=f"{name}_block{i}",
)
self.blocks.append(block)

self.norm = layers.LayerNormalization(epsilon=LAYERNORM_EPSILON, name=f"{name}_norm")
self.norm = layers.LayerNormalization(
epsilon=LAYERNORM_EPSILON, name=f"{name}_norm"
)

# Classifier head
if class_num:
Expand Down

0 comments on commit 7e5794e

Please sign in to comment.