tf.constant([1, 2, 3]) | Create constant tensor |
tf.Variable([1, 2, 3]) | Create variable tensor |
tf.zeros((3, 4)) | Tensor of zeros |
tf.ones((2, 3)) | Tensor of ones |
tf.fill((2, 2), 7) | Fill with value |
tf.eye(4) | Identity matrix |
tf.range(0, 10, 2) | Range tensor |
tf.linspace(0.0, 1.0, 5) | Evenly spaced |
tf.random.uniform((3, 4)) | Uniform random |
tf.random.normal((3, 4)) | Normal random |
tf.random.truncated_normal((3, 4)) | Truncated normal |
tf.random.set_seed(42) | Set random seed |
tf.random.shuffle(tensor) | Shuffle tensor |
t.shape | Tensor shape |
t.dtype | Data type |
t.device | Device |
tf.size(t) | Number of elements |
tf.rank(t) | Number of dimensions |
t.numpy() | Convert to NumPy |
tf.add(a, b), a + b | Addition |
tf.subtract(a, b), a - b | Subtraction |
tf.multiply(a, b), a * b | Element-wise multiply |
tf.matmul(a, b), a @ b | Matrix multiply |
tf.pow(t, 2) | Power |
tf.sqrt(t) | Square root |
tf.exp(t), tf.math.log(t) | Exp/Log |
tf.abs(t) | Absolute value |
tf.reduce_sum(t) | Sum all elements |
tf.reduce_mean(t) | Mean |
tf.reduce_max(t), tf.reduce_min(t) | Max/Min |
tf.reduce_prod(t) | Product |
tf.argmax(t), tf.argmin(t) | Index of max/min |
tf.reshape(t, (3, 4)) | Reshape tensor |
tf.squeeze(t) | Remove size-1 dims |
tf.expand_dims(t, axis=0) | Add dimension |
tf.transpose(t) | Transpose |
tf.concat([a, b], axis=0) | Concatenate |
tf.stack([a, b], axis=0) | Stack tensors |
tf.split(t, 3, axis=0) | Split tensor |
model = tf.keras.Sequential([...]) | Create sequential model |
model.add(tf.keras.layers.Dense(64)) | Add layer |
model.summary() | Print model summary |
model.compile(optimizer, loss, metrics) | Compile model |
inputs = tf.keras.Input(shape=(784,)) | Define input |
x = tf.keras.layers.Dense(64)(inputs) | Apply layer |
model = tf.keras.Model(inputs, outputs) | Create model |
layers.Dense(units, activation="relu") | Dense/Fully connected |
layers.Conv2D(filters, kernel_size) | 2D Convolution |
layers.MaxPooling2D(pool_size) | Max pooling |
layers.Flatten() | Flatten layer |
layers.Dropout(rate) | Dropout |
layers.BatchNormalization() | Batch normalization |
layers.LSTM(units) | LSTM layer |
layers.Embedding(vocab_size, embed_dim) | Embedding |
activation="relu" | ReLU |
activation="sigmoid" | Sigmoid |
activation="tanh" | Tanh |
activation="softmax" | Softmax |
tf.keras.activations.gelu | GELU |
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) | Compile model |
model.fit(x_train, y_train, epochs=10, batch_size=32) | Train model |
model.fit(..., validation_data=(x_val, y_val)) | With validation |
model.fit(..., validation_split=0.2) | Auto validation split |
tf.keras.optimizers.SGD(lr=0.01) | SGD |
tf.keras.optimizers.Adam(lr=0.001) | Adam |
tf.keras.optimizers.RMSprop(lr=0.001) | RMSprop |
tf.keras.optimizers.AdamW(lr=0.001) | AdamW |
"sparse_categorical_crossentropy" | For integer labels |
"categorical_crossentropy" | For one-hot labels |
"binary_crossentropy" | Binary classification |
"mse" or "mean_squared_error" | Mean squared error |
"mae" or "mean_absolute_error" | Mean absolute error |
tf.keras.callbacks.ModelCheckpoint(path) | Save best model |
tf.keras.callbacks.EarlyStopping(patience=5) | Early stopping |
tf.keras.callbacks.TensorBoard(log_dir) | TensorBoard logging |
tf.keras.callbacks.LearningRateScheduler(fn) | LR scheduler |
tf.keras.callbacks.ReduceLROnPlateau() | Reduce LR on plateau |
model.evaluate(x_test, y_test) | Evaluate on test data |
model.predict(x_new) | Make predictions |
model.predict_classes(x_new) | Predict classes (deprecated) |
tf.argmax(model.predict(x), axis=1) | Get class predictions |
tf.data.Dataset.from_tensor_slices((x, y)) | Create from tensors |
dataset.batch(32) | Batch data |
dataset.shuffle(buffer_size) | Shuffle data |
dataset.prefetch(tf.data.AUTOTUNE) | Prefetch for performance |
dataset.map(transform_fn) | Apply transformation |
dataset.cache() | Cache dataset |
dataset.repeat() | Repeat dataset |
tf.keras.datasets.mnist.load_data() | MNIST dataset |
tf.keras.datasets.cifar10.load_data() | CIFAR-10 dataset |
tf.keras.datasets.imdb.load_data() | IMDB reviews |
model.save("model.keras") | Save entire model |
tf.keras.models.load_model("model.keras") | Load model |
model.save_weights("weights.h5") | Save weights only |
model.load_weights("weights.h5") | Load weights |
tf.saved_model.save(model, "saved_model/") | SavedModel format |