├── README.md ├── tf_6.py ├── tf_keras_13.py ├── tf_keras_11.py ├── tf_keras_9.py ├── tf_7_2.py └── tf_8.py /README.md: -------------------------------------------------------------------------------- 1 | # tensorflow -------------------------------------------------------------------------------- /tf_6.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | 6 | class DenseNN(tf.Module): 7 | def __init__(self, outputs): 8 | super().__init__() 9 | self.outputs = outputs 10 | self.fl_init = False 11 | 12 | def __call__(self, x): 13 | if not self.fl_init: 14 | self.w = tf.random.truncated_normal((x.shape[-1], self.outputs), stddev=0.1, name="w") 15 | self.b = tf.zeros([self.outputs], dtype=tf.float32, name="b") 16 | 17 | self.w = tf.Variable(self.w) 18 | self.b = tf.Variable(self.b) 19 | 20 | self.fl_init = True 21 | 22 | y = x @ self.w + self.b 23 | return y 24 | 25 | 26 | model = DenseNN(1) 27 | #print( model(tf.constant([[1.0, 2.0]])) ) 28 | 29 | x_train = tf.random.uniform(minval=0, maxval=10, shape=(100, 2)) 30 | y_train = [a + b for a, b in x_train] 31 | 32 | loss = lambda x, y: tf.reduce_mean(tf.square(x - y)) 33 | opt = tf.optimizers.Adam(learning_rate=0.01) 34 | 35 | EPOCHS = 50 36 | for n in range(EPOCHS): 37 | for x, y in zip(x_train, y_train): 38 | x = tf.expand_dims(x, axis=0) 39 | y = tf.constant(y, shape=(1, 1)) 40 | 41 | with tf.GradientTape() as tape: 42 | f_loss = loss(y, model(x)) 43 | 44 | grads = tape.gradient(f_loss, model.trainable_variables) 45 | opt.apply_gradients(zip(grads, model.trainable_variables)) 46 | 47 | print(f_loss.numpy()) 48 | 49 | print(model.trainable_variables) 50 | -------------------------------------------------------------------------------- /tf_keras_13.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow import keras 6 | from tensorflow.keras import layers 7 | from tensorflow.keras.datasets import cifar10, mnist 8 | 9 | tf.random.set_seed(1) 10 | 11 | 12 | (x_train, y_train), (x_test, y_test) = cifar10.load_data() 13 | 14 | x_train = x_train / 255 15 | x_test = x_test / 255 16 | 17 | y_train = keras.utils.to_categorical(y_train, 10) 18 | y_test = keras.utils.to_categorical(y_test, 10) 19 | 20 | inputs = keras.Input(shape=(32, 32, 3), name="img") 21 | x = layers.Conv2D(32, 3, activation="relu")(inputs) 22 | x = layers.Conv2D(64, 3, activation="relu")(x) 23 | block_1_output = layers.MaxPooling2D(3)(x) 24 | 25 | x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output) 26 | x = layers.Conv2D(64, 3, activation="relu", padding="same")(x) 27 | block_2_output = layers.add([x, block_1_output]) 28 | 29 | x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output) 30 | x = layers.Conv2D(64, 3, activation="relu", padding="same")(x) 31 | block_3_output = layers.add([x, block_2_output]) 32 | 33 | x = layers.Conv2D(64, 3, activation="relu")(block_3_output) 34 | x = layers.GlobalAveragePooling2D()(x) 35 | x = layers.Dense(256, activation="relu")(x) 36 | x = layers.Dropout(0.5)(x) 37 | outputs = layers.Dense(10, activation='softmax')(x) 38 | 39 | model = keras.Model(inputs, outputs, name="toy_resnet") 40 | #model.summary() 41 | 42 | model.compile(optimizer='adam', 43 | loss='categorical_crossentropy', 44 | metrics=['accuracy']) 45 | 46 | model.fit(x_train, y_train, batch_size=64, epochs=15, validation_split=0.2) 47 | 48 | print( model.evaluate(x_test, y_test) ) 49 | -------------------------------------------------------------------------------- /tf_keras_11.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPooling2D, Flatten, Conv2DTranspose 6 | from tensorflow import keras 7 | from tensorflow.keras.datasets import cifar10, mnist 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | enc_input = Input(shape=(28, 28, 1)) 12 | x = Conv2D(32, 3, activation='relu')(enc_input) 13 | x = MaxPooling2D(2, padding='same')(x) 14 | x = Conv2D(64, 3, activation='relu')(x) 15 | x = MaxPooling2D(2, padding='same')(x) 16 | x = Flatten()(x) 17 | enc_output = Dense(8, activation='linear')(x) 18 | 19 | encoder = keras.Model(enc_input, enc_output, name="encoder") 20 | 21 | dec_input = keras.Input(shape=(8,), name="encoded_img") 22 | x = Dense(7 * 7 * 8, activation='relu')(dec_input) 23 | x = keras.layers.Reshape((7, 7, 8))(x) 24 | x = Conv2DTranspose(64, 5, strides=(2, 2), activation="relu", padding='same')(x) 25 | x = keras.layers.BatchNormalization()(x) 26 | x = Conv2DTranspose(32, 5, strides=(2, 2), activation="linear", padding='same')(x) 27 | x = keras.layers.BatchNormalization()(x) 28 | dec_output = Conv2DTranspose(1, 3, activation="sigmoid", padding='same')(x) 29 | 30 | decoder = keras.Model(dec_input, dec_output, name="decoder") 31 | 32 | autoencoder_input = Input(shape=(28, 28, 1), name="img") 33 | x = encoder(autoencoder_input) 34 | autoencoder_output = decoder(x) 35 | 36 | autoencoder = keras.Model(autoencoder_input, autoencoder_output, name="autoencoder") 37 | #autoencoder.summary() 38 | 39 | 40 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 41 | 42 | x_train = x_train.astype("float32") / 255.0 43 | x_test = x_test.astype("float32") / 255.0 44 | y_train = keras.utils.to_categorical(y_train, 10) 45 | y_test = keras.utils.to_categorical(y_test, 10) 46 | 47 | autoencoder.compile(optimizer='adam', loss='mean_squared_error') 48 | autoencoder.fit(x_train, x_train, batch_size=32, epochs=1) 49 | 50 | h = encoder.predict(tf.expand_dims(x_test[0], axis=0)) 51 | img = decoder.predict(h) 52 | 53 | plt.subplot(121) 54 | plt.imshow(x_test[0], cmap='gray') 55 | plt.subplot(122) 56 | plt.imshow(img.squeeze(), cmap='gray') 57 | plt.show() 58 | -------------------------------------------------------------------------------- /tf_keras_9.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras.datasets import mnist 6 | from tensorflow.keras.utils import to_categorical 7 | 8 | 9 | class DenseLayer(tf.keras.layers.Layer): 10 | def __init__(self, units=1): 11 | super().__init__() 12 | self.units = units 13 | self.rate = 0.01 14 | 15 | def build(self, input_shape): 16 | self.w = self.add_weight(shape=(input_shape[-1], self.units), 17 | initializer="random_normal", 18 | trainable=True) 19 | self.b = self.add_weight(shape=(self.units,), initializer="zeros", trainable=True) 20 | 21 | def call(self, inputs): 22 | regular = 100.0 * tf.reduce_mean(tf.square(self.w)) 23 | self.add_loss(regular) 24 | self.add_metric(regular, name="mean square weights") 25 | 26 | return tf.matmul(inputs, self.w) + self.b 27 | 28 | 29 | class NeuralNetwork(tf.keras.Model): 30 | def __init__(self): 31 | super().__init__() 32 | self.layer_1 = DenseLayer(128) 33 | self.layer_2 = DenseLayer(10) 34 | 35 | def call(self, inputs): 36 | x = self.layer_1(inputs) 37 | x = tf.nn.relu(x) 38 | x = self.layer_2(x) 39 | x = tf.nn.softmax(x) 40 | return x 41 | 42 | 43 | model = NeuralNetwork() 44 | 45 | # model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001), 46 | # loss=tf.losses.categorical_crossentropy, 47 | # metrics=['accuracy']) 48 | 49 | model.compile(optimizer='adam', 50 | loss='categorical_crossentropy', 51 | metrics=['accuracy']) 52 | 53 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 54 | 55 | x_train = x_train / 255 56 | x_test = x_test / 255 57 | 58 | x_train = tf.reshape(tf.cast(x_train, tf.float32), [-1, 28*28]) 59 | x_test = tf.reshape(tf.cast(x_test, tf.float32), [-1, 28*28]) 60 | 61 | y_train = to_categorical(y_train, 10) 62 | y_test_cat = to_categorical(y_test, 10) 63 | 64 | model.fit(x_train, y_train, batch_size=32, epochs=5) 65 | 66 | print( model.evaluate(x_test, y_test_cat) ) 67 | -------------------------------------------------------------------------------- /tf_7_2.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras.datasets import mnist 6 | from tensorflow.keras.utils import to_categorical 7 | import numpy as np 8 | 9 | class DenseNN(tf.Module): 10 | def __init__(self, outputs, activate="relu"): 11 | super().__init__() 12 | self.outputs = outputs 13 | self.activate = activate 14 | self.fl_init = False 15 | 16 | def __call__(self, x): 17 | if not self.fl_init: 18 | self.w = tf.random.truncated_normal((x.shape[-1], self.outputs), stddev=0.1, name="w") 19 | self.b = tf.zeros([self.outputs], dtype=tf.float32, name="b") 20 | 21 | self.w = tf.Variable(self.w) 22 | self.b = tf.Variable(self.b) 23 | 24 | self.fl_init = True 25 | 26 | y = x @ self.w + self.b 27 | 28 | if self.activate == "relu": 29 | return tf.nn.relu(y) 30 | elif self.activate == "softmax": 31 | return tf.nn.softmax(y) 32 | 33 | return y 34 | 35 | 36 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 37 | 38 | x_train = x_train / 255 39 | x_test = x_test / 255 40 | 41 | x_train = tf.reshape(tf.cast(x_train, tf.float32), [-1, 28*28]) 42 | x_test = tf.reshape(tf.cast(x_test, tf.float32), [-1, 28*28]) 43 | 44 | y_train = to_categorical(y_train, 10) 45 | 46 | 47 | layer_1 = DenseNN(128) 48 | layer_2 = DenseNN(10, activate="softmax") 49 | 50 | 51 | def model_predict(x): 52 | y = layer_1(x) 53 | y = layer_2(y) 54 | return y # layer_2(layer_1(x)) 55 | 56 | 57 | cross_entropy = lambda y_true, y_pred: tf.reduce_mean(tf.losses.categorical_crossentropy(y_true, y_pred)) 58 | opt = tf.optimizers.Adam(learning_rate=0.001) 59 | 60 | BATCH_SIZE = 32 61 | EPOCHS = 10 62 | TOTAL = x_train.shape[0] 63 | 64 | train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 65 | train_dataset = train_dataset.shuffle(buffer_size=1024).batch(BATCH_SIZE) 66 | 67 | for n in range(EPOCHS): 68 | loss = 0 69 | 70 | for x_batch, y_batch in train_dataset: 71 | with tf.GradientTape() as tape: 72 | f_loss = cross_entropy(y_batch, model_predict(x_batch)) 73 | 74 | loss += f_loss 75 | grads = tape.gradient(f_loss, [layer_1.trainable_variables, layer_2.trainable_variables]) 76 | opt.apply_gradients(zip(grads[0], layer_1.trainable_variables)) 77 | opt.apply_gradients(zip(grads[1], layer_2.trainable_variables)) 78 | 79 | print(loss.numpy()) 80 | 81 | y = model_predict(x_test) 82 | y2 = tf.argmax(y, axis=1).numpy() 83 | acc = len(y_test[y_test == y2])/y_test.shape[0] * 100 84 | print(acc) 85 | 86 | 87 | # acc = tf.metrics.Accuracy() 88 | # acc.update_state(y_test, y2) 89 | # print( acc.result().numpy() * 100 ) 90 | -------------------------------------------------------------------------------- /tf_8.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras.datasets import mnist 6 | from tensorflow.keras.utils import to_categorical 7 | import numpy as np 8 | 9 | class DenseNN(tf.Module): 10 | def __init__(self, outputs, activate="relu"): 11 | super().__init__() 12 | self.outputs = outputs 13 | self.activate = activate 14 | self.fl_init = False 15 | 16 | def __call__(self, x): 17 | if not self.fl_init: 18 | self.w = tf.random.truncated_normal((x.shape[-1], self.outputs), stddev=0.1, name="w") 19 | self.b = tf.zeros([self.outputs], dtype=tf.float32, name="b") 20 | 21 | self.w = tf.Variable(self.w) 22 | self.b = tf.Variable(self.b, trainable=False) 23 | 24 | self.fl_init = True 25 | 26 | y = x @ self.w + self.b 27 | 28 | if self.activate == "relu": 29 | return tf.nn.relu(y) 30 | elif self.activate == "softmax": 31 | return tf.nn.softmax(y) 32 | 33 | return y 34 | 35 | 36 | class SequentialModule(tf.Module): 37 | def __init__(self): 38 | super().__init__() 39 | self.layer_1 = DenseNN(128) 40 | self.layer_2 = DenseNN(10, activate="softmax") 41 | 42 | def __call__(self, x): 43 | return self.layer_2(self.layer_1(x)) 44 | 45 | 46 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 47 | 48 | x_train = x_train / 255 49 | x_test = x_test / 255 50 | 51 | x_train = tf.reshape(tf.cast(x_train, tf.float32), [-1, 28*28]) 52 | x_test = tf.reshape(tf.cast(x_test, tf.float32), [-1, 28*28]) 53 | 54 | y_train = to_categorical(y_train, 10) 55 | 56 | 57 | model = SequentialModule() 58 | # layer_1 = DenseNN(128) 59 | # layer_2 = DenseNN(10, activate="softmax") 60 | #print(model.submodules) 61 | 62 | cross_entropy = lambda y_true, y_pred: tf.reduce_mean(tf.losses.categorical_crossentropy(y_true, y_pred)) 63 | opt = tf.optimizers.Adam(learning_rate=0.001) 64 | 65 | BATCH_SIZE = 32 66 | EPOCHS = 10 67 | TOTAL = x_train.shape[0] 68 | 69 | train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 70 | train_dataset = train_dataset.shuffle(buffer_size=1024).batch(BATCH_SIZE) 71 | 72 | 73 | @tf.function 74 | def train_batch(x_batch, y_batch): 75 | with tf.GradientTape() as tape: 76 | f_loss = cross_entropy(y_batch, model(x_batch)) 77 | 78 | grads = tape.gradient(f_loss, model.trainable_variables) 79 | opt.apply_gradients(zip(grads, model.trainable_variables)) 80 | 81 | return f_loss 82 | 83 | 84 | for n in range(EPOCHS): 85 | loss = 0 86 | for x_batch, y_batch in train_dataset: 87 | loss += train_batch(x_batch, y_batch) 88 | 89 | print(loss.numpy()) 90 | 91 | 92 | y = model(x_test) 93 | y2 = tf.argmax(y, axis=1).numpy() 94 | acc = len(y_test[y_test == y2])/y_test.shape[0] * 100 95 | print(acc) 96 | 97 | acc = tf.metrics.Accuracy() 98 | acc.update_state(y_test, y2) 99 | print( acc.result().numpy() * 100 ) 100 | --------------------------------------------------------------------------------