24
loading...
This website collects cookies to deliver better user experience
# synchronous
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
# model parameters are created as mirrored variables instead of regular variables
# variables are mirrored across all workers
model = tf.keras.Sequential([
tf.keras.layers.Dense(32, input_shape=(5,)),
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(loss='mse', optimizer='sgd')
model.fit(train_dataset, epochs=2)
model.evaluate(train_dataset)
# asynchronous
async_strategy = tf.distribute.ParameterServerStrategy()
with async_strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Dense(32, input_shape=(5,)),
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(loss='mse', optimizer='sgd')
model.fit(train_dataset, epochs=2)
model.evaluate(train_dataset)
TPUClusterResolver
- points to the location of the TPUsTPUStrategy
We don't need to specify any arguments for tpu_address
when working with Google Colab.
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu_address)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
tf.data.Dataset.prefetch(buffer_size)
. This overlaps the preprocessing and model execution of a training step. So, while the model is executing training step N, the input pipeline is preparing data for training step N + 1.