diff --git a/site/en/community/contribute/docs_style.md b/site/en/community/contribute/docs_style.md
index eba78afa896..d4e42cb5235 100644
--- a/site/en/community/contribute/docs_style.md
+++ b/site/en/community/contribute/docs_style.md
@@ -63,7 +63,7 @@ repository like this:
* \[Basics\]\(../../guide/basics.ipynb\)
produces
[Basics](../../guide/basics.ipynb).
-This is the prefered approach because this way the links on
+This is the preferred approach because this way the links on
[tensorflow.org](https://www.tensorflow.org),
[GitHub](https://github.com/tensorflow/docs){:.external} and
[Colab](https://github.com/tensorflow/docs/tree/master/site/en/guide/bazics.ipynb){:.external}
diff --git a/site/en/guide/data.ipynb b/site/en/guide/data.ipynb
index d9c8fff8982..739ef131005 100644
--- a/site/en/guide/data.ipynb
+++ b/site/en/guide/data.ipynb
@@ -1385,7 +1385,7 @@
"The simplest form of batching stacks `n` consecutive elements of a dataset into\n",
"a single element. The `Dataset.batch()` transformation does exactly this, with\n",
"the same constraints as the `tf.stack()` operator, applied to each component\n",
- "of the elements: i.e. for each component *i*, all elements must have a tensor\n",
+ "of the elements: i.e., for each component *i*, all elements must have a tensor\n",
"of the exact same shape."
]
},
diff --git a/site/en/guide/dtensor_overview.ipynb b/site/en/guide/dtensor_overview.ipynb
index 95a50f3465f..1b55ee0283f 100644
--- a/site/en/guide/dtensor_overview.ipynb
+++ b/site/en/guide/dtensor_overview.ipynb
@@ -281,7 +281,7 @@
"id": "Eyp_qOSyvieo"
},
"source": [
- "\n"
+ "\n"
]
},
{
@@ -303,7 +303,7 @@
"source": [
"For the same `mesh_2d`, the layout `Layout([\"x\", dtensor.UNSHARDED], mesh_2d)` is a layout for a rank-2 `Tensor` that is replicated across `\"y\"`, and whose first axis is sharded on mesh dimension `x`.\n",
"\n",
- "\n"
+ "\n"
]
},
{
diff --git a/site/en/guide/migrate/migrating_feature_columns.ipynb b/site/en/guide/migrate/migrating_feature_columns.ipynb
index ea12a5ef391..b2dbc5fe7c0 100644
--- a/site/en/guide/migrate/migrating_feature_columns.ipynb
+++ b/site/en/guide/migrate/migrating_feature_columns.ipynb
@@ -654,17 +654,17 @@
"source": [
"categorical_col = tf1.feature_column.categorical_column_with_identity(\n",
" 'type', num_buckets=one_hot_dims)\n",
- "# Convert index to one-hot; e.g. [2] -> [0,0,1].\n",
+ "# Convert index to one-hot; e.g., [2] -> [0,0,1].\n",
"indicator_col = tf1.feature_column.indicator_column(categorical_col)\n",
"\n",
- "# Convert strings to indices; e.g. ['small'] -> [1].\n",
+ "# Convert strings to indices; e.g., ['small'] -> [1].\n",
"vocab_col = tf1.feature_column.categorical_column_with_vocabulary_list(\n",
" 'size', vocabulary_list=vocab, num_oov_buckets=1)\n",
"# Embed the indices.\n",
"embedding_col = tf1.feature_column.embedding_column(vocab_col, embedding_dims)\n",
"\n",
"normalizer_fn = lambda x: (x - weight_mean) / math.sqrt(weight_variance)\n",
- "# Normalize the numeric inputs; e.g. [2.0] -> [0.0].\n",
+ "# Normalize the numeric inputs; e.g., [2.0] -> [0.0].\n",
"numeric_col = tf1.feature_column.numeric_column(\n",
" 'weight', normalizer_fn=normalizer_fn)\n",
"\n",
@@ -727,12 +727,12 @@
" 'size': tf.keras.Input(shape=(), dtype='string'),\n",
" 'weight': tf.keras.Input(shape=(), dtype='float32'),\n",
"}\n",
- "# Convert index to one-hot; e.g. [2] -> [0,0,1].\n",
+ "# Convert index to one-hot; e.g., [2] -> [0,0,1].\n",
"type_output = tf.keras.layers.CategoryEncoding(\n",
" one_hot_dims, output_mode='one_hot')(inputs['type'])\n",
- "# Convert size strings to indices; e.g. ['small'] -> [1].\n",
+ "# Convert size strings to indices; e.g., ['small'] -> [1].\n",
"size_output = tf.keras.layers.StringLookup(vocabulary=vocab)(inputs['size'])\n",
- "# Normalize the numeric inputs; e.g. [2.0] -> [0.0].\n",
+ "# Normalize the numeric inputs; e.g., [2.0] -> [0.0].\n",
"weight_output = tf.keras.layers.Normalization(\n",
" axis=None, mean=weight_mean, variance=weight_variance)(inputs['weight'])\n",
"outputs = {\n",
diff --git a/site/en/guide/migrate/migration_debugging.ipynb b/site/en/guide/migrate/migration_debugging.ipynb
index 86c86680dc9..25cb7f9065f 100644
--- a/site/en/guide/migrate/migration_debugging.ipynb
+++ b/site/en/guide/migrate/migration_debugging.ipynb
@@ -128,7 +128,7 @@
"\n",
" a. Check training behaviors with TensorBoard\n",
"\n",
- " * use simple optimizers e.g. SGD and simple distribution strategies e.g.\n",
+ " * use simple optimizers e.g., SGD and simple distribution strategies e.g.\n",
" `tf.distribute.OneDeviceStrategy` first\n",
" * training metrics\n",
" * evaluation metrics\n",
diff --git a/site/en/guide/profiler.md b/site/en/guide/profiler.md
index 1cd19c109fe..e92d1b9eae4 100644
--- a/site/en/guide/profiler.md
+++ b/site/en/guide/profiler.md
@@ -694,7 +694,7 @@ first few batches to avoid inaccuracies due to initialization overhead.
An example for profiling multiple workers:
```python
- # E.g. your worker IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you
+ # E.g., your worker IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you
# would like to profile for a duration of 2 seconds.
tf.profiler.experimental.client.trace(
'grpc://10.0.0.2:8466,grpc://10.0.0.3:8466,grpc://10.0.0.4:8466',
@@ -845,7 +845,7 @@ more efficient by casting to different data types after applying
spatial transformations, such as flipping, cropping, rotating, etc.
Note: Some ops like `tf.image.resize` transparently change the `dtype` to
-`fp32`. Make sure you normalize your data to lie between `0` and `1` if its not
+`fp32`. Make sure you normalize your data to lie between `0` and `1` if it's not
done automatically. Skipping this step could lead to `NaN` errors if you have
enabled [AMP](https://developer.nvidia.com/automatic-mixed-precision).
diff --git a/site/en/guide/random_numbers.ipynb b/site/en/guide/random_numbers.ipynb
index 5212a10a49a..f8b824ad906 100644
--- a/site/en/guide/random_numbers.ipynb
+++ b/site/en/guide/random_numbers.ipynb
@@ -166,7 +166,7 @@
"source": [
"See the *Algorithms* section below for more information about it.\n",
"\n",
- "Another way to create a generator is with `Generator.from_non_deterministic_state`. A generator created this way will start from a non-deterministic state, depending on e.g. time and OS."
+ "Another way to create a generator is with `Generator.from_non_deterministic_state`. A generator created this way will start from a non-deterministic state, depending on e.g., time and OS."
]
},
{
diff --git a/site/en/guide/tf_numpy_type_promotion.ipynb b/site/en/guide/tf_numpy_type_promotion.ipynb
index a9e176c5db6..703f481e5cf 100644
--- a/site/en/guide/tf_numpy_type_promotion.ipynb
+++ b/site/en/guide/tf_numpy_type_promotion.ipynb
@@ -455,7 +455,7 @@
{
"cell_type": "markdown",
"metadata": {
- "id": "7UmunnJ8Tru3"
+ "id": "7UmunnJ8True3"
},
"source": [
"**First Case**: When `tf.constant` is called with an input with no user-specified dtype."
diff --git a/site/en/hub/common_saved_model_apis/text.md b/site/en/hub/common_saved_model_apis/text.md
index 1c45b8ea026..c618b02d9f1 100644
--- a/site/en/hub/common_saved_model_apis/text.md
+++ b/site/en/hub/common_saved_model_apis/text.md
@@ -132,8 +132,8 @@ preprocessor = hub.load("path/to/preprocessor") # Must match `encoder`.
encoder_inputs = preprocessor(text_input)
encoder = hub.load("path/to/encoder")
-enocder_outputs = encoder(encoder_inputs)
-embeddings = enocder_outputs["default"]
+encoder_outputs = encoder(encoder_inputs)
+embeddings = encoder_outputs["default"]
```
Recall from the [Reusable SavedModel API](../reusable_saved_models.md) that
@@ -304,8 +304,8 @@ provisions from the [Reusable SavedModel API](../reusable_saved_models.md).
#### Usage synopsis
```python
-enocder = hub.load("path/to/encoder")
-enocder_outputs = encoder(encoder_inputs)
+encoder = hub.load("path/to/encoder")
+encoder_outputs = encoder(encoder_inputs)
```
or equivalently in Keras:
diff --git a/site/en/hub/tf2_saved_model.md b/site/en/hub/tf2_saved_model.md
index 7a7220d0a2e..641f9b3517b 100644
--- a/site/en/hub/tf2_saved_model.md
+++ b/site/en/hub/tf2_saved_model.md
@@ -82,7 +82,7 @@ and uncompressed SavedModels. For details, see [Caching](caching.md).
SavedModels can be loaded from a specified `handle`, where the `handle` is a
filesystem path, valid TFhub.dev model URL (e.g. "https://tfhub.dev/...").
Kaggle Models URLs mirror TFhub.dev handles in accordance with our Terms and the
-license associated with the model assets, e.g. "https://www.kaggle.com/...".
+license associated with the model assets, e.g., "https://www.kaggle.com/...".
Handles from Kaggle Models are equivalent to their corresponding TFhub.dev
handle.
diff --git a/site/en/hub/tutorials/action_recognition_with_tf_hub.ipynb b/site/en/hub/tutorials/action_recognition_with_tf_hub.ipynb
index b4a1e439621..3f586991ba9 100644
--- a/site/en/hub/tutorials/action_recognition_with_tf_hub.ipynb
+++ b/site/en/hub/tutorials/action_recognition_with_tf_hub.ipynb
@@ -184,7 +184,7 @@
" return list(_VIDEO_LIST)\n",
"\n",
"def fetch_ucf_video(video):\n",
- " \"\"\"Fetchs a video and cache into local filesystem.\"\"\"\n",
+ " \"\"\"Fetches a video and cache into local filesystem.\"\"\"\n",
" cache_path = os.path.join(_CACHE_DIR, video)\n",
" if not os.path.exists(cache_path):\n",
" urlpath = request.urljoin(UCF_ROOT, video)\n",
diff --git a/site/en/hub/tutorials/cropnet_cassava.ipynb b/site/en/hub/tutorials/cropnet_cassava.ipynb
index 18f41c00da1..926b5395e41 100644
--- a/site/en/hub/tutorials/cropnet_cassava.ipynb
+++ b/site/en/hub/tutorials/cropnet_cassava.ipynb
@@ -199,7 +199,7 @@
"id": "QT3XWAtR6BRy"
},
"source": [
- "The *cassava* dataset has images of cassava leaves with 4 distinct diseases as well as healthy cassava leaves. The model can predict all of these classes as well as sixth class for \"unknown\" when the model is not confident in it's prediction."
+ "The *cassava* dataset has images of cassava leaves with 4 distinct diseases as well as healthy cassava leaves. The model can predict all of these classes as well as sixth class for \"unknown\" when the model is not confident in its prediction."
]
},
{
diff --git a/site/en/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder.ipynb b/site/en/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder.ipynb
index 31fc037dfe7..920d197811e 100644
--- a/site/en/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder.ipynb
+++ b/site/en/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder.ipynb
@@ -271,7 +271,7 @@
"spanish_sentences = ['perro', 'Los cachorros son agradables.', 'Disfruto de dar largos paseos por la playa con mi perro.']\n",
"\n",
"# Multilingual example\n",
- "multilingual_example = [\"Willkommen zu einfachen, aber\", \"verrassend krachtige\", \"multilingüe\", \"compréhension du langage naturel\", \"модели.\", \"大家是什么意思\" , \"보다 중요한\", \".اللغة التي يتحدثونها\"]\n",
+ "multilingual_example = [\"Willkommen zu einfachen, aber\", \"verrassend krachtige\", \"multilingüe\", \"compréhension du language naturel\", \"модели.\", \"大家是什么意思\" , \"보다 중요한\", \".اللغة التي يتحدثونها\"]\n",
"multilingual_example_in_en = [\"Welcome to simple yet\", \"surprisingly powerful\", \"multilingual\", \"natural language understanding\", \"models.\", \"What people mean\", \"matters more than\", \"the language they speak.\"]\n"
]
},
@@ -4174,7 +4174,7 @@
"id": "Dxu66S8wJIG9"
},
"source": [
- "### Semantic-search crosss-lingual capabilities\n",
+ "### Semantic-search cross-lingual capabilities\n",
"\n",
"In this section we show how to retrieve sentences related to a set of sample English sentences. Things to try:\n",
"\n",
diff --git a/site/en/hub/tutorials/image_enhancing.ipynb b/site/en/hub/tutorials/image_enhancing.ipynb
index 4c9496b79ae..3710ebd6d66 100644
--- a/site/en/hub/tutorials/image_enhancing.ipynb
+++ b/site/en/hub/tutorials/image_enhancing.ipynb
@@ -346,7 +346,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "id": "r_dautO6qbTV"
+ "id": "r_defaultO6qbTV"
},
"outputs": [],
"source": [
diff --git a/site/en/hub/tutorials/image_feature_vector.ipynb b/site/en/hub/tutorials/image_feature_vector.ipynb
index 29ac0c97ddd..b5283c45b3d 100644
--- a/site/en/hub/tutorials/image_feature_vector.ipynb
+++ b/site/en/hub/tutorials/image_feature_vector.ipynb
@@ -357,7 +357,7 @@
"source": [
"## Train the network\n",
"\n",
- "Now that our model is built, let's train it and see how it perfoms on our test set."
+ "Now that our model is built, let's train it and see how it performs on our test set."
]
},
{
diff --git a/site/en/hub/tutorials/movenet.ipynb b/site/en/hub/tutorials/movenet.ipynb
index 2b6ffc6eb54..f7955a5253b 100644
--- a/site/en/hub/tutorials/movenet.ipynb
+++ b/site/en/hub/tutorials/movenet.ipynb
@@ -450,7 +450,7 @@
"id": "ymTVR2I9x22I"
},
"source": [
- "This session demonstrates the minumum working example of running the model on a **single image** to predict the 17 human keypoints."
+ "This session demonstrates the minimum working example of running the model on a **single image** to predict the 17 human keypoints."
]
},
{
@@ -697,7 +697,7 @@
" return output_image\n",
"\n",
"def run_inference(movenet, image, crop_region, crop_size):\n",
- " \"\"\"Runs model inferece on the cropped region.\n",
+ " \"\"\"Runs model inference on the cropped region.\n",
"\n",
" The function runs the model inference on the cropped region and updates the\n",
" model output to the original image coordinate system.\n",
diff --git a/site/en/hub/tutorials/movinet.ipynb b/site/en/hub/tutorials/movinet.ipynb
index 61609dbf72a..24600256cf9 100644
--- a/site/en/hub/tutorials/movinet.ipynb
+++ b/site/en/hub/tutorials/movinet.ipynb
@@ -890,7 +890,7 @@
" steps = video.shape[0]\n",
" # estimate duration of the video (in seconds)\n",
" duration = steps / video_fps\n",
- " # estiamte top_k probabilities and corresponding labels\n",
+ " # estimate top_k probabilities and corresponding labels\n",
" top_probs, top_labels, _ = get_top_k_streaming_labels(probs, k=top_k)\n",
"\n",
" images = []\n",
@@ -950,7 +950,7 @@
" logits, states = model({**states, 'image': image})\n",
" all_logits.append(logits)\n",
"\n",
- "# concatinating all the logits\n",
+ "# concatenating all the logits\n",
"logits = tf.concat(all_logits, 0)\n",
"# estimating probabilities\n",
"probs = tf.nn.softmax(logits, axis=-1)"
diff --git a/site/en/hub/tutorials/senteval_for_universal_sentence_encoder_cmlm.ipynb b/site/en/hub/tutorials/senteval_for_universal_sentence_encoder_cmlm.ipynb
index b152d3deee8..c33dce64c92 100644
--- a/site/en/hub/tutorials/senteval_for_universal_sentence_encoder_cmlm.ipynb
+++ b/site/en/hub/tutorials/senteval_for_universal_sentence_encoder_cmlm.ipynb
@@ -117,7 +117,7 @@
"id": "7a2ohPn8vMe2"
},
"source": [
- "#Execute a SentEval evaulation task\n",
+ "#Execute a SentEval evaluation task\n",
"The following code block executes a SentEval task and output the results, choose one of the following tasks to evaluate the USE CMLM model:\n",
"\n",
"```\n",
diff --git a/site/en/hub/tutorials/spice.ipynb b/site/en/hub/tutorials/spice.ipynb
index b58d07e46da..9ff6cd3bd62 100644
--- a/site/en/hub/tutorials/spice.ipynb
+++ b/site/en/hub/tutorials/spice.ipynb
@@ -658,7 +658,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "id": "eMUTI4L52ZHA"
+ "id": "eMULTI4L52ZHA"
},
"outputs": [],
"source": [
diff --git a/site/en/hub/tutorials/tf2_object_detection.ipynb b/site/en/hub/tutorials/tf2_object_detection.ipynb
index 38b162068d9..d06ad401824 100644
--- a/site/en/hub/tutorials/tf2_object_detection.ipynb
+++ b/site/en/hub/tutorials/tf2_object_detection.ipynb
@@ -291,7 +291,7 @@
"id": "yX3pb_pXDjYA"
},
"source": [
- "Intalling the Object Detection API"
+ "Installing the Object Detection API"
]
},
{
@@ -554,7 +554,7 @@
"\n",
"Among the available object detection models there's Mask R-CNN and the output of this model allows instance segmentation.\n",
"\n",
- "To visualize it we will use the same method we did before but adding an aditional parameter: `instance_masks=output_dict.get('detection_masks_reframed', None)`\n"
+ "To visualize it we will use the same method we did before but adding an additional parameter: `instance_masks=output_dict.get('detection_masks_reframed', None)`\n"
]
},
{
diff --git a/site/en/hub/tutorials/tf_hub_generative_image_module.ipynb b/site/en/hub/tutorials/tf_hub_generative_image_module.ipynb
index 4669f3b2dc3..4937bc2eb22 100644
--- a/site/en/hub/tutorials/tf_hub_generative_image_module.ipynb
+++ b/site/en/hub/tutorials/tf_hub_generative_image_module.ipynb
@@ -421,7 +421,7 @@
"If image is from the module space, the descent is quick and converges to a reasonable sample. Try out descending to an image that is **not from the module space**. The descent will only converge if the image is reasonably close to the space of training images.\n",
"\n",
"How to make it descend faster and to a more realistic image? One can try:\n",
- "* using different loss on the image difference, e.g. quadratic,\n",
+ "* using different loss on the image difference, e.g., quadratic,\n",
"* using different regularizer on the latent vector,\n",
"* initializing from a random vector in multiple runs,\n",
"* etc.\n"
diff --git a/site/en/install/source_windows.md b/site/en/install/source_windows.md
index 9cf33d0458b..758e5dbea45 100644
--- a/site/en/install/source_windows.md
+++ b/site/en/install/source_windows.md
@@ -95,7 +95,7 @@ a release branch that is known to work.
## Optional: Environmental Variable Set Up
Run following commands before running build command to avoid issue with package creation:
-(If the below commands were set up while installing the packages, please ignore them). Run `set` check if all the paths were set correctly, run `echo %Environmental Variable%` e.g. `echo %BAZEL_VC%` to check path set up for a specific Environmental Variable
+(If the below commands were set up while installing the packages, please ignore them). Run `set` check if all the paths were set correctly, run `echo %Environmental Variable%` e.g., `echo %BAZEL_VC%` to check path set up for a specific Environmental Variable
Python path set up issue [tensorflow:issue#59943](https://github.com/tensorflow/tensorflow/issues/59943),[tensorflow:issue#9436](https://github.com/tensorflow/tensorflow/issues/9436),[tensorflow:issue#60083](https://github.com/tensorflow/tensorflow/issues/60083)
@@ -257,7 +257,7 @@ your platform. Use `pip3 install` to install the package, for example:
pip3 install C:/tmp/tensorflow_pkg/tensorflow-version-tags.whl -e.g. pip3 install C:/tmp/tensorflow_pkg/tensorflow-2.12.0-cp310-cp310-win_amd64.whl +e.g., pip3 install C:/tmp/tensorflow_pkg/tensorflow-2.12.0-cp310-cp310-win_amd64.whlSuccess: TensorFlow is now installed. diff --git a/site/en/r1/guide/datasets.md b/site/en/r1/guide/datasets.md index b1ed1b6e113..d7c38bf2f92 100644 --- a/site/en/r1/guide/datasets.md +++ b/site/en/r1/guide/datasets.md @@ -437,7 +437,7 @@ dataset = dataset.batch(32) iterator = dataset.make_initializable_iterator() # You can feed the initializer with the appropriate filenames for the current -# phase of execution, e.g. training vs. validation. +# phase of execution, e.g., training vs. validation. # Initialize `iterator` with training data. training_filenames = ["/var/data/file1.tfrecord", "/var/data/file2.tfrecord"] @@ -639,7 +639,7 @@ TODO(mrry): Add this section. The simplest form of batching stacks `n` consecutive elements of a dataset into a single element. The `Dataset.batch()` transformation does exactly this, with the same constraints as the `tf.stack()` operator, applied to each component -of the elements: i.e. for each component *i*, all elements must have a tensor +of the elements: i.e., for each component *i*, all elements must have a tensor of the exact same shape. ```python diff --git a/site/en/r1/guide/distribute_strategy.ipynb b/site/en/r1/guide/distribute_strategy.ipynb index 79d6293eba7..3c0b453a278 100644 --- a/site/en/r1/guide/distribute_strategy.ipynb +++ b/site/en/r1/guide/distribute_strategy.ipynb @@ -607,7 +607,7 @@ }, "source": [ "## Using `tf.distribute.Strategy` with custom training loops\n", - "As you've seen, using `tf.distrbute.Strategy` with high level APIs is only a couple lines of code change. With a little more effort, `tf.distrbute.Strategy` can also be used by other users who are not using these frameworks.\n", + "As you've seen, using `tf.distribute.Strategy` with high level APIs is only a couple lines of code change. With a little more effort, `tf.distribute.Strategy` can also be used by other users who are not using these frameworks.\n", "\n", "TensorFlow is used for a wide variety of use cases and some users (such as researchers) require more flexibility and control over their training loops. This makes it hard for them to use the high level frameworks such as Estimator or Keras. For instance, someone using a GAN may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training. So these users will usually write their own training loops.\n", "\n", diff --git a/site/en/r1/guide/graph_viz.md b/site/en/r1/guide/graph_viz.md index 1965378e03e..1e3780e7928 100644 --- a/site/en/r1/guide/graph_viz.md +++ b/site/en/r1/guide/graph_viz.md @@ -251,7 +251,7 @@ is a snippet from the train and test section of a modification of the [Estimators MNIST tutorial](../tutorials/estimators/cnn.md), in which we have recorded summaries and runtime statistics. See the -[Tensorboard](https://tensorflow.org/tensorboard) +[TensorBoard](https://tensorflow.org/tensorboard) for details on how to record summaries. Full source is [here](https://github.com/tensorflow/tensorflow/tree/r1.15/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py). diff --git a/site/en/r1/guide/performance/overview.md b/site/en/r1/guide/performance/overview.md index af74f0f28c6..461fa4feb58 100644 --- a/site/en/r1/guide/performance/overview.md +++ b/site/en/r1/guide/performance/overview.md @@ -122,7 +122,7 @@ tf.Session(config=config) Intel® has added optimizations to TensorFlow for Intel® Xeon® and Intel® Xeon Phi™ through the use of the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) optimized primitives. The optimizations also provide speedups -for the consumer line of processors, e.g. i5 and i7 Intel processors. The Intel +for the consumer line of processors, e.g., i5 and i7 Intel processors. The Intel published paper [TensorFlow* Optimizations on Modern Intel® Architecture](https://software.intel.com/en-us/articles/tensorflow-optimizations-on-modern-intel-architecture) contains additional details on the implementation. @@ -255,7 +255,7 @@ bazel build -c opt --copt=-march="broadwell" --config=cuda //tensorflow/tools/pi a docker container, the data is not cached and the penalty is paid each time TensorFlow starts. The best practice is to include the [compute capabilities](http://developer.nvidia.com/cuda-gpus) - of the GPUs that will be used, e.g. P100: 6.0, Titan X (Pascal): 6.1, + of the GPUs that will be used, e.g., P100: 6.0, Titan X (Pascal): 6.1, Titan X (Maxwell): 5.2, and K80: 3.7. * Use a version of `gcc` that supports all of the optimizations of the target CPU. The recommended minimum gcc version is 4.8.3. On macOS, upgrade to the diff --git a/site/en/r1/tutorials/distribute/keras.ipynb b/site/en/r1/tutorials/distribute/keras.ipynb index 059b8c2d66f..14e8bf739a9 100644 --- a/site/en/r1/tutorials/distribute/keras.ipynb +++ b/site/en/r1/tutorials/distribute/keras.ipynb @@ -86,7 +86,7 @@ "Essentially, it copies all of the model's variables to each processor.\n", "Then, it uses [all-reduce](http://mpitutorial.com/tutorials/mpi-reduce-and-allreduce/) to combine the gradients from all processors and applies the combined value to all copies of the model.\n", "\n", - "`MirroredStategy` is one of several distribution strategy available in TensorFlow core. You can read about more strategies at [distribution strategy guide](../../guide/distribute_strategy.ipynb).\n" + "`MirroredStrategy` is one of several distribution strategy available in TensorFlow core. You can read about more strategies at [distribution strategy guide](../../guide/distribute_strategy.ipynb).\n" ] }, { diff --git a/site/en/r1/tutorials/images/deep_cnn.md b/site/en/r1/tutorials/images/deep_cnn.md index 00a914d8976..ef259516952 100644 --- a/site/en/r1/tutorials/images/deep_cnn.md +++ b/site/en/r1/tutorials/images/deep_cnn.md @@ -108,7 +108,7 @@ reusable by constructing the graph with the following modules: operations that read and preprocess CIFAR images for evaluation and training, respectively. 1. [**Model prediction:**](#model-prediction) `inference()` -adds operations that perform inference, i.e. classification, on supplied images. +adds operations that perform inference, i.e., classification, on supplied images. 1. [**Model training:**](#model-training) `loss()` and `train()` add operations that compute the loss, gradients, variable updates and visualization summaries. @@ -405,7 +405,7 @@ a "tower". We must set two attributes for each tower: * A unique name for all operations within a tower. `tf.name_scope` provides this unique name by prepending a scope. For instance, all operations in -the first tower are prepended with `tower_0`, e.g. `tower_0/conv1/Conv2D`. +the first tower are prepended with `tower_0`, e.g., `tower_0/conv1/Conv2D`. * A preferred hardware device to run the operation within a tower. `tf.device` specifies this. For diff --git a/site/en/r1/tutorials/images/image_recognition.md b/site/en/r1/tutorials/images/image_recognition.md index 0be884de403..2cbf9eee378 100644 --- a/site/en/r1/tutorials/images/image_recognition.md +++ b/site/en/r1/tutorials/images/image_recognition.md @@ -140,7 +140,7 @@ score of 0.8. -Next, try it out on your own images by supplying the --image= argument, e.g. +Next, try it out on your own images by supplying the --image= argument, e.g., ```bash bazel-bin/tensorflow/examples/label_image/label_image --image=my_image.png diff --git a/site/en/r1/tutorials/representation/unicode.ipynb b/site/en/r1/tutorials/representation/unicode.ipynb index 98aaacff5b9..a128724d31e 100644 --- a/site/en/r1/tutorials/representation/unicode.ipynb +++ b/site/en/r1/tutorials/representation/unicode.ipynb @@ -425,7 +425,7 @@ "source": [ "### Character substrings\n", "\n", - "Similarly, the `tf.strings.substr` operation accepts the \"`unit`\" parameter, and uses it to determine what kind of offsets the \"`pos`\" and \"`len`\" paremeters contain." + "Similarly, the `tf.strings.substr` operation accepts the \"`unit`\" parameter, and uses it to determine what kind of offsets the \"`pos`\" and \"`len`\" parameters contain." ] }, { diff --git a/site/en/r1/tutorials/representation/word2vec.md b/site/en/r1/tutorials/representation/word2vec.md index f6a27c68f3c..c76db7ab108 100644 --- a/site/en/r1/tutorials/representation/word2vec.md +++ b/site/en/r1/tutorials/representation/word2vec.md @@ -36,7 +36,7 @@ like to get your hands dirty with the details. Image and audio processing systems work with rich, high-dimensional datasets encoded as vectors of the individual raw pixel-intensities for image data, or -e.g. power spectral density coefficients for audio data. For tasks like object +e.g., power spectral density coefficients for audio data. For tasks like object or speech recognition we know that all the information required to successfully perform the task is encoded in the data (because humans can perform these tasks from the raw data). However, natural language processing systems traditionally @@ -109,7 +109,7 @@ $$ where \\(\text{score}(w_t, h)\\) computes the compatibility of word \\(w_t\\) with the context \\(h\\) (a dot product is commonly used). We train this model by maximizing its [log-likelihood](https://en.wikipedia.org/wiki/Likelihood_function) -on the training set, i.e. by maximizing +on the training set, i.e., by maximizing $$ \begin{align} @@ -176,7 +176,7 @@ As an example, let's consider the dataset We first form a dataset of words and the contexts in which they appear. We could define 'context' in any way that makes sense, and in fact people have looked at syntactic contexts (i.e. the syntactic dependents of the current -target word, see e.g. +target word, see e.g., [Levy et al.](https://levyomer.files.wordpress.com/2014/04/dependency-based-word-embeddings-acl-2014.pdf)), words-to-the-left of the target, words-to-the-right of the target, etc. For now, let's stick to the vanilla definition and define 'context' as the window @@ -204,7 +204,7 @@ where the goal is to predict `the` from `quick`. We select `num_noise` number of noisy (contrastive) examples by drawing from some noise distribution, typically the unigram distribution, \\(P(w)\\). For simplicity let's say `num_noise=1` and we select `sheep` as a noisy example. Next we compute the -loss for this pair of observed and noisy examples, i.e. the objective at time +loss for this pair of observed and noisy examples, i.e., the objective at time step \\(t\\) becomes $$J^{(t)}_\text{NEG} = \log Q_\theta(D=1 | \text{the, quick}) + @@ -212,7 +212,7 @@ $$J^{(t)}_\text{NEG} = \log Q_\theta(D=1 | \text{the, quick}) + The goal is to make an update to the embedding parameters \\(\theta\\) to improve (in this case, maximize) this objective function. We do this by deriving the -gradient of the loss with respect to the embedding parameters \\(\theta\\), i.e. +gradient of the loss with respect to the embedding parameters \\(\theta\\), i.e., \\(\frac{\partial}{\partial \theta} J_\text{NEG}\\) (luckily TensorFlow provides easy helper functions for doing this!). We then perform an update to the embeddings by taking a small step in the direction of the gradient. When this @@ -227,7 +227,7 @@ When we inspect these visualizations it becomes apparent that the vectors capture some general, and in fact quite useful, semantic information about words and their relationships to one another. It was very interesting when we first discovered that certain directions in the induced vector space specialize -towards certain semantic relationships, e.g. *male-female*, *verb tense* and +towards certain semantic relationships, e.g., *male-female*, *verb tense* and even *country-capital* relationships between words, as illustrated in the figure below (see also for example [Mikolov et al., 2013](https://www.aclweb.org/anthology/N13-1090)). diff --git a/site/en/r1/tutorials/sequences/audio_recognition.md b/site/en/r1/tutorials/sequences/audio_recognition.md index 8ad71b88a3c..0388514ec92 100644 --- a/site/en/r1/tutorials/sequences/audio_recognition.md +++ b/site/en/r1/tutorials/sequences/audio_recognition.md @@ -159,9 +159,9 @@ accuracy. If the training accuracy increases but the validation doesn't, that's a sign that overfitting is occurring, and your model is only learning things about the training clips, not broader patterns that generalize. -## Tensorboard +## TensorBoard -A good way to visualize how the training is progressing is using Tensorboard. By +A good way to visualize how the training is progressing is using TensorBoard. By default, the script saves out events to /tmp/retrain_logs, and you can load these by running: diff --git a/site/en/tutorials/distribute/multi_worker_with_estimator.ipynb b/site/en/tutorials/distribute/multi_worker_with_estimator.ipynb index 2abf05aa9f8..fcee0618854 100644 --- a/site/en/tutorials/distribute/multi_worker_with_estimator.ipynb +++ b/site/en/tutorials/distribute/multi_worker_with_estimator.ipynb @@ -186,7 +186,7 @@ "\n", "There are two components of `TF_CONFIG`: `cluster` and `task`. `cluster` provides information about the entire cluster, namely the workers and parameter servers in the cluster. `task` provides information about the current task. The first component `cluster` is the same for all workers and parameter servers in the cluster, and the second component `task` is different on each worker and parameter server and specifies its own `type` and `index`. In this example, the task `type` is `worker` and the task `index` is `0`.\n", "\n", - "For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e. modify the task `index`.\n", + "For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e., modify the task `index`.\n", "\n", "Warning: *Do not execute the following code in Colab.* TensorFlow's runtime will attempt to create a gRPC server at the specified IP address and port, which will likely fail. See the [keras version](multi_worker_with_keras.ipynb) of this tutorial for an example of how you can test run multiple workers on a single machine.\n", "\n", diff --git a/site/en/tutorials/generative/cyclegan.ipynb b/site/en/tutorials/generative/cyclegan.ipynb index 4c2b3ba8777..313be519591 100644 --- a/site/en/tutorials/generative/cyclegan.ipynb +++ b/site/en/tutorials/generative/cyclegan.ipynb @@ -154,7 +154,7 @@ "This is similar to what was done in [pix2pix](https://www.tensorflow.org/tutorials/generative/pix2pix#load_the_dataset)\n", "\n", "* In random jittering, the image is resized to `286 x 286` and then randomly cropped to `256 x 256`.\n", - "* In random mirroring, the image is randomly flipped horizontally i.e. left to right." + "* In random mirroring, the image is randomly flipped horizontally i.e., left to right." ] }, { diff --git a/site/en/tutorials/generative/data_compression.ipynb b/site/en/tutorials/generative/data_compression.ipynb index b6c043c0598..f756f088acd 100644 --- a/site/en/tutorials/generative/data_compression.ipynb +++ b/site/en/tutorials/generative/data_compression.ipynb @@ -821,7 +821,7 @@ { "cell_type": "markdown", "metadata": { - "id": "3ELLMAN1OwMQ" + "id": "3ELLMANN1OwMQ" }, "source": [ "The strings begin to get much shorter now, on the order of one byte per digit. However, this comes at a cost. More digits are becoming unrecognizable.\n", diff --git a/site/en/tutorials/generative/pix2pix.ipynb b/site/en/tutorials/generative/pix2pix.ipynb index 5912fab9be3..e45950dd923 100644 --- a/site/en/tutorials/generative/pix2pix.ipynb +++ b/site/en/tutorials/generative/pix2pix.ipynb @@ -280,7 +280,7 @@ "\n", "1. Resize each `256 x 256` image to a larger height and width—`286 x 286`.\n", "2. Randomly crop it back to `256 x 256`.\n", - "3. Randomly flip the image horizontally i.e. left to right (random mirroring).\n", + "3. Randomly flip the image horizontally i.e., left to right (random mirroring).\n", "4. Normalize the images to the `[-1, 1]` range." ] }, diff --git a/site/en/tutorials/interpretability/integrated_gradients.ipynb b/site/en/tutorials/interpretability/integrated_gradients.ipynb index 2ee792aa4e2..e63c8cdb7a2 100644 --- a/site/en/tutorials/interpretability/integrated_gradients.ipynb +++ b/site/en/tutorials/interpretability/integrated_gradients.ipynb @@ -724,7 +724,7 @@ "ax2 = plt.subplot(1, 2, 2)\n", "# Average across interpolation steps\n", "average_grads = tf.reduce_mean(path_gradients, axis=[1, 2, 3])\n", - "# Normalize gradients to 0 to 1 scale. E.g. (x - min(x))/(max(x)-min(x))\n", + "# Normalize gradients to 0 to 1 scale. E.g., (x - min(x))/(max(x)-min(x))\n", "average_grads_norm = (average_grads-tf.math.reduce_min(average_grads))/(tf.math.reduce_max(average_grads)-tf.reduce_min(average_grads))\n", "ax2.plot(alphas, average_grads_norm)\n", "ax2.set_title('Average pixel gradients (normalized) over alpha')\n", diff --git a/site/en/tutorials/keras/save_and_load.ipynb b/site/en/tutorials/keras/save_and_load.ipynb index 02c8af3a71d..404fa1ee8be 100644 --- a/site/en/tutorials/keras/save_and_load.ipynb +++ b/site/en/tutorials/keras/save_and_load.ipynb @@ -854,7 +854,7 @@ " * `from_config(cls, config)` uses the returned config from `get_config` to create a new object. By default, this function will use the config as initialization kwargs (`return cls(**config)`).\n", "2. Pass the custom objects to the model in one of three ways:\n", " - Register the custom object with the `@tf.keras.utils.register_keras_serializable` decorator. **(recommended)**\n", - " - Directly pass the object to the `custom_objects` argument when loading the model. The argument must be a dictionary mapping the string class name to the Python class. E.g. `tf.keras.models.load_model(path, custom_objects={'CustomLayer': CustomLayer})`\n", + " - Directly pass the object to the `custom_objects` argument when loading the model. The argument must be a dictionary mapping the string class name to the Python class. E.g., `tf.keras.models.load_model(path, custom_objects={'CustomLayer': CustomLayer})`\n", " - Use a `tf.keras.utils.custom_object_scope` with the object included in the `custom_objects` dictionary argument, and place a `tf.keras.models.load_model(path)` call within the scope.\n", "\n", "Refer to the [Writing layers and models from scratch](https://www.tensorflow.org/guide/keras/custom_layers_and_models) tutorial for examples of custom objects and `get_config`.\n" diff --git a/site/en/tutorials/load_data/pandas_dataframe.ipynb b/site/en/tutorials/load_data/pandas_dataframe.ipynb index cee2483a350..66bace1ff87 100644 --- a/site/en/tutorials/load_data/pandas_dataframe.ipynb +++ b/site/en/tutorials/load_data/pandas_dataframe.ipynb @@ -1036,8 +1036,8 @@ }, "outputs": [], "source": [ - "preprocesssed_result = tf.concat(preprocessed, axis=-1)\n", - "preprocesssed_result" + "preprocessed_result = tf.concat(preprocessed, axis=-1)\n", + "preprocessed_result" ] }, { @@ -1057,7 +1057,7 @@ }, "outputs": [], "source": [ - "preprocessor = tf.keras.Model(inputs, preprocesssed_result)" + "preprocessor = tf.keras.Model(inputs, preprocessed_result)" ] }, { diff --git a/site/en/tutorials/structured_data/imbalanced_data.ipynb b/site/en/tutorials/structured_data/imbalanced_data.ipynb index 0d9578b30dc..16d08e53385 100644 --- a/site/en/tutorials/structured_data/imbalanced_data.ipynb +++ b/site/en/tutorials/structured_data/imbalanced_data.ipynb @@ -445,7 +445,7 @@ "\n", "#### Metrics for probability predictions\n", "\n", - "As we train our network with the cross entropy as a loss function, it is fully capable of predicting class probabilities, i.e. it is a probabilistic classifier.\n", + "As we train our network with the cross entropy as a loss function, it is fully capable of predicting class probabilities, i.e., it is a probabilistic classifier.\n", "Good metrics to assess probabilistic predictions are, in fact, **proper scoring rules**. Their key property is that predicting the true probability is optimal. We give two well-known examples:\n", "\n", "* **cross entropy** also known as log loss\n", diff --git a/tools/tensorflow_docs/api_generator/doc_generator_visitor.py b/tools/tensorflow_docs/api_generator/doc_generator_visitor.py index ce6fe68105f..0467f74b153 100644 --- a/tools/tensorflow_docs/api_generator/doc_generator_visitor.py +++ b/tools/tensorflow_docs/api_generator/doc_generator_visitor.py @@ -596,7 +596,7 @@ def _get_physical_path(self, py_object): @classmethod def from_path_tree(cls, path_tree: PathTree, score_name_fn) -> ApiTree: - """Create an ApiTree from an PathTree. + """Create an ApiTree from a PathTree. Args: path_tree: The `PathTree` to convert. diff --git a/tools/tensorflow_docs/api_generator/parser_test.py b/tools/tensorflow_docs/api_generator/parser_test.py index ee8a55f707f..0bfffeded92 100644 --- a/tools/tensorflow_docs/api_generator/parser_test.py +++ b/tools/tensorflow_docs/api_generator/parser_test.py @@ -799,7 +799,7 @@ class A(): self.assertEqual('Instance of `m.A`', result) - def testIsClasssAttr(self): + def testIsClassAttr(self): result = parser.is_class_attr('test_module.test_function', {'test_module': test_module}) self.assertFalse(result) @@ -808,6 +808,7 @@ def testIsClasssAttr(self): {'TestClass': TestClass}) self.assertTrue(result) + RELU_DOC = """Computes rectified linear: `max(features, 0)` RELU is an activation diff --git a/tools/tensorflow_docs/api_generator/toc.py b/tools/tensorflow_docs/api_generator/toc.py index 1e72bcda75c..feaa15b8bda 100644 --- a/tools/tensorflow_docs/api_generator/toc.py +++ b/tools/tensorflow_docs/api_generator/toc.py @@ -273,7 +273,7 @@ def _is_deprecated(self, api_node: doc_generator_visitor.ApiTreeNode): api_node: The node to evaluate. Returns: - True if depreacted else False. + True if deprecated else False. """ if doc_controls.is_deprecated(api_node.py_object): return True diff --git a/tools/tensorflow_docs/tools/nblint/decorator.py b/tools/tensorflow_docs/tools/nblint/decorator.py index 408fef3d969..d74045c7ca7 100644 --- a/tools/tensorflow_docs/tools/nblint/decorator.py +++ b/tools/tensorflow_docs/tools/nblint/decorator.py @@ -161,7 +161,7 @@ def fail(message: Optional[str] = None, Failure messages come in two flavors: - conditional: (Default) While this test may fail here, it may succeed - elsewhere, and thus, the larger condition passes and do not dislay this + elsewhere, and thus, the larger condition passes and do not display this message. - non-conditional (always show): Regardless if the larger condition is met, display this error message in the status report. For example, a