Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
mattdangerw committed Aug 21, 2023
1 parent bb510bd commit 8c3035e
Show file tree
Hide file tree
Showing 30 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion keras_nlp/layers/modeling/masked_lm_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class MaskedLMHead(keras.layers.Layer):
predict with shape `(batch_size, masks_per_sequence)`.
The token encodings should usually be the last output of an encoder model,
and mask positions should be the interger positions you would like to
and mask positions should be the integer positions you would like to
predict for the MaskedLM task.
The layer will first gather the token encodings at the mask positions. These
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/layers/modeling/rotary_embedding_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def test_multi_dimension_layer_output_shape(self):
inputs = keras.Input(shape=(None, seq_length, hidden_size))
outputs = embedding_layer(inputs)

# When using muliple dimensions before sequence length, the output is
# When using multiple dimensions before sequence length, the output is
# expected to be the same as the input shape in all dimensions.
expected_output_shape = (None, None, seq_length, hidden_size)
self.assertEqual(expected_output_shape, outputs.shape)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/layers/modeling/sine_position_encoding_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_multi_dimension_layer_output_shape(self):
inputs = keras.Input(shape=(None, seq_length, hidden_size))
outputs = pos_encoding(inputs)

# When using muliple dimensions before sequence length, the output is
# When using multiple dimensions before sequence length, the output is
# expected to be the same as the input shape in all dimensions.
expected_output_shape = (None, None, seq_length, hidden_size)
self.assertEqual(expected_output_shape, outputs.shape)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/layers/preprocessing/multi_segment_packer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class MultiSegmentPacker(PreprocessingLayer):
"""Packs multiple sequences into a single fixed width model input.
This layer packs multiple input sequences into a single fixed width sequence
containing start and end delimeters, forming an dense input suitable for a
containing start and end delimeters, forming a dense input suitable for a
classification task for BERT and BERT-like models.
Takes as input a tuple of token segments. Each tuple element should contain
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/albert/albert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class AlbertClassifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/albert/albert_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class AlbertMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)

# If the subclass does not define from_preset, assign a wrapper so that
# each class can have an distinct docstring.
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:

def from_preset(calling_cls, *args, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def generate_preprocess(
strings for values, tokenizes and packs the input, and computes a
padding mask masking all inputs not filled in with a padded value.
Unlike calling the the layer for training, this method does not compute
Unlike calling the layer for training, this method does not compute
labels and will never append a tokenizer.end_token_id to the end of
the decoder sequence (as generation is expected to continue at the end
of the inputted decoder prompt).
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/bert/bert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class BertClassifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/bert/bert_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class BertMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/deberta_v3/deberta_v3_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ class DebertaV3Classifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class DebertaV3MaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/distil_bert/distil_bert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class DistilBertClassifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/distil_bert/distil_bert_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class DistilBertMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/f_net/f_net_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class FNetClassifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/f_net/f_net_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class FNetMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/generative_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def generate(
`preprocessor` is attached to the model, `inputs` should match
the structure expected by the `preprocessor` layer. If a
`preprocessor` is not attached, `inputs` should match the
structure expected the the `backbone` model.
structure expected the `backbone` model.
max_length: Optional. int. The max length of the generated sequence.
Will default to the max configured `sequence_length` of the
`preprocessor`. If `preprocessor` is `None`, `inputs` should be
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/gpt2/gpt2_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

@keras_nlp_export("keras_nlp.models.GPT2CausalLM")
class GPT2CausalLM(GenerativeTask):
"""An end-to-end GPT2 model for causal langauge modeling.
"""An end-to-end GPT2 model for causal language modeling.
A causal language model (LM) predicts the next token based on previous
tokens. This task setup can be used to train the model unsupervised on
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def generate_preprocess(
or tensor strings, tokenizes and packs the input, and computes a padding
mask masking all inputs not filled in with a padded value.
Unlike calling the the layer for training, this method does not compute
Unlike calling the layer for training, this method does not compute
labels and will never append a `tokenizer.end_token_id` to the end of
the sequence (as generation is expected to continue at the end of the
inputted prompt).
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

@keras_nlp_export("keras_nlp.models.GPTNeoXCausalLM")
class GPTNeoXCausalLM(GenerativeTask):
"""An end-to-end GPTNeoX model for causal langauge modeling.
"""An end-to-end GPTNeoX model for causal language modeling.
A causal language model (LM) predicts the next token based on previous
tokens. This task setup can be used to train the model unsupervised on
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def generate_preprocess(
or tensor strings, tokenizes and packs the input, and computes a padding
mask masking all inputs not filled in with a padded value.
Unlike calling the the layer for training, this method does not compute
Unlike calling the layer for training, this method does not compute
labels and will never append a `tokenizer.end_token_id` to the end of
the sequence (as generation is expected to continue at the end of the
inputted prompt).
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/opt/opt_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

@keras_nlp_export("keras_nlp.models.OPTCausalLM")
class OPTCausalLM(GenerativeTask):
"""An end-to-end OPT model for causal langauge modeling.
"""An end-to-end OPT model for causal language modeling.
A causal language model (LM) predicts the next token based on previous
tokens. This task setup can be used to train the model unsupervised on
Expand Down
4 changes: 2 additions & 2 deletions keras_nlp/models/opt/opt_causal_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def generate_preprocess(
or tensor strings, tokenizes and packs the input, and computes a padding
mask masking all inputs not filled in with a padded value.
Unlike calling the the layer for training, this method does not compute
Unlike calling the layer for training, this method does not compute
labels and will never append a `tokenizer.end_token_id` to the end of
the sequence (as generation is expected to continue at the end of the
inputted prompt).
Expand All @@ -162,7 +162,7 @@ def generate_postprocess(
"""Covert integer token output to strings for generation.
This method reverses `generate_preprocess()`, by first removing all
padding and start/end tokens, and then converting the interger sequence
padding and start/end tokens, and then converting the integer sequence
back to a string.
"""
token_ids, padding_mask = x["token_ids"], x["padding_mask"]
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)

# If the subclass does not define from_preset, assign a wrapper so that
# each class can have an distinct docstring.
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:

def from_preset(calling_cls, *args, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/roberta/roberta_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class RobertaClassifier(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
classifier.backbone.trainable = False
# Fit again.
classifier.fit(x=features, y=labels, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/roberta/roberta_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class RobertaMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class XLMRobertaMaskedLM(Task):
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programatically (e.g., to change `trainable`).
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/samplers/contrastive_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def body(prompt, cache, index, logits, hidden_states):
(1 - self.alpha) * next_token_probabilities
- self.alpha * max_similarity_scores
)
# Unflatten varibles to shape [batch_size, self.k, ...] for
# Unflatten variables to shape [batch_size, self.k, ...] for
# gather purpose.
unflat_score = unflatten_beams(accumulated_scores)
unflat_prompt = unflatten_beams(prompt_beams)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/tokenizers/byte_pair_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,7 @@ def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)

# If the subclass does not define from_preset, assign a wrapper so that
# each class can have an distinct docstring.
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:

def from_preset(calling_cls, *args, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/tokenizers/sentence_piece_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)

# If the subclass does not define from_preset, assign a wrapper so that
# each class can have an distinct docstring.
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:

def from_preset(calling_cls, *args, **kwargs):
Expand Down

0 comments on commit 8c3035e

Please sign in to comment.