| Name | snudlhelper JSON |
| Version |
0.0.1
JSON |
| download |
| home_page | |
| Summary | A small example package |
| upload_time | 2023-11-06 21:50:24 |
| maintainer | |
| docs_url | None |
| author | |
| requires_python | >=3.7 |
| license | |
| keywords |
|
| VCS |
 |
| bugtrack_url |
|
| requirements |
No requirements were recorded.
|
| Travis-CI |
No Travis.
|
| coveralls test coverage |
No coveralls.
|
Ex. No: 1
Implementing Logic Gates using Perceptron Algorithm
Date: 20/7/23
Aim: To write a program using python to implement logic gates using perceptron algorithm.
Algorithm:
1) Import necessary packages.
2) Assign different weights and bias for each logic gate.
3) Perform the Required Operations.
4) Display the Output
Program :
import numpy as np
def step(x):
return 1 if x >= 0.5 else 0
### Not Gate
X = np.array([[0],[1]])
weights = np.array([-1])
bias = np.array([1])
out = X@weights + bias
print(out)
### Tautology
X = np.array([[0,0],[0,1],[1,0],[1,1]])
weights = np.array([[0],[0]])
bias = np.array([1])
out = X@weights + bias
print(out)
### AND Gate
X = np.array([[0,0],[0,1],[1,0],[1,1]])
weights = np.array([[1],[1]])
bias = np.array([-1])
out = X@weights + bias
y = np.array(list(map(step, out))).reshape(4,1)
print(y)
### OR Gate
X = np.array([[0,0],[0,1],[1,0],[1,1]])
weights = np.array([[1],[1]])
bias = np.array([0])
out = X@weights + bias
y = np.array(list(map(step, out))).reshape(4,1)
print(y)
### NOR Gate
X = np.array([[0,0],[0,1],[1,0],[1,1]])
weights = np.array([[-1],[-1]])
bias = np.array([1])
out = X@weights + bias
y = np.array(list(map(step, out))).reshape(4,1)
print(y)
### NAND Gate
X = np.array([[0,0],[0,1],[1,0],[1,1]])
weights = np.array([[-1],[-1]])
bias = np.array([2])
out = X@weights + bias
y = np.array(list(map(step, out))).reshape(4,1)
print(y)
### XOR
def step(x):
return 1 if x>0 else 0
step = np.vectorize(step)
x = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])
w1 = np.array([[1,-1],[-1,1]])
w2 = np.array([[1],[1]])
y_cap = step(step(x@w1)@w2)
y_cap
### XNOR
x = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[1],[0],[0],[1]])
w1 = np.array([[1,-1],[1,-1]])
w2 = np.array([[1],[1]])
bias = np.array([[-1,1]])
y_cap = step(step(x@w1 + bias)@w2)
y_cap
Result: Thus, using Python, the logic gates have been implemented
Ex. No: 2
MLP and Linear Regression
Date: 20/7/23
Aim: To write a program in TensorFlow for MLP and Linear Regression.
Algorithm:
1) Import the necessary packages.
2) Load the Datasets.
3) Compile the model.
4) Evaluate the model.
5) Print the metrices.
Program :
### MNIST
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
import tensorflow.keras.datasets.mnist as mnist
(x_train,y_train), (x_test,y_test) = mnist.load_data()
model = tf.keras.models.Sequential([
tf.keras.layers.Normalization(),
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10)
])
model.compile(
optimizer = tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
model.fit(x=x_train,y=y_train,batch_size=64,epochs=10)
model.evaluate(x=x_test,y=y_test)
### Linear Regression Using Tensorflow
learning_rate = 0.01
training_epochs = 1000
np.random.seed(69)
x = np.random.random_sample(500).reshape(-1,1)
y = x*2 + 0.5*np.random.random(500).reshape(-1,1)
#### Training Loop
model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss='mean_absolute_error')
model.fit(x,y,epochs=training_epochs,batch_size=128,verbose=False)
y_pred = model.predict(x, verbose=False)
plt.scatter(x,y)
plt.plot(x, y_pred, c="red")
print("Training Cost :", model.evaluate(x,y,verbose=False))
print("Weight :", model.get_weights()[0])
print("Bias :", model.get_weights()[1])
Result: Thus, using TensorFlow library, MLP and Linear Regression have been implemented.
Ex. No: 3
Ablation Studies
Date: 27/7/23
Aim: To Perform Ablation Studies on a Neural Network Architecture.
Algorithm:
1) Import the necessary packages.
2) Find a Vector dataset from Open Source.
3) Train the designed architecture with initial set of parameters
4) Change the training methods with various hyperparameters and record the observations.
5) Make comments about the inferences made from the table.
Program :
### Wine Dataset
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("Wine.csv")
print("Shape :", df.shape)
df.head()
df.info()
### Pre Processing
# One-Hot Quality column
df = pd.concat([df, pd.get_dummies(df["quality"], drop_first=True)], axis=1)
df = df.drop(["quality"], axis=1)
y = df["color"]
x = df.drop(["color"], axis=1)
# Label Encode color column
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
y = label.fit_transform(df["color"])
# Train-Test Split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,
random_state=69)
### Model Building
import keras
from keras.models import Sequential
from keras.layers import Normalization
from keras.layers import Dense
architecture = [
Sequential([Normalization(),
Dense(32, activation="relu"),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid")]),
Sequential([Normalization(),
Dense(64, activation="relu"),
Dense(32, activation="relu"),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid")]),
Sequential([Normalization(),
Dense(128, activation="relu"),
Dense(32, activation="relu"),
Dense(1, activation="sigmoid")])
]
optimizer = [keras.optimizers.Adam(learning_rate=0.01),
keras.optimizers.Adam(learning_rate=0.001),
keras.optimizers.SGD(learning_rate=0.01)]
epochs = [5,10,20]
output = pd.DataFrame(columns=["No of Layers","No of
Params","Optimizer","Learning Rate","Epochs","Accuracy"])
for arch in architecture:
for opti in optimizer:
for epoch in epochs:
model = arch
model.compile(optimizer=opti, loss="BinaryCrossentropy",
metrics="Accuracy")
model.fit(x_train, y_train, epochs=epoch, verbose=False)
y_pred = model.evaluate(x_test, y_test, verbose=False)
output = output.append(pd.DataFrame(
{"No of Layers" : [len(model.layers)],
"No of Params" : [model.count_params()],
"Optimizer" : [opti.get_config()["name"]],
"Learning Rate" : [opti.learning_rate.numpy()],
"Epochs" : [epoch],
"Accuracy" : [y_pred[1]]}))
Result: Thus, using TensorFlow library, Ablation Studies on neural network have been
performed.
Ex. No: 4
Regularization Techniques and Linear Regression
Date: 3/8/23
Aim: To perform Regularization Techniques and implement Linear Regression using PyTorch.
Algorithm:
1) Import the necessary packages.
2) Load the Dataset.
3) Perform Pre-Processing steps on the dataset before fitting to the model.
4) Compare the Base Model with various Regularization methods and record the
observations.
5) Compare the observations
6) Plot the Inference for better Understanding
Program :
### Wine Dataset
import pandas as pd
df = pd.read_csv("Wine.csv")
print("Shape :", df.shape)
df.head()
df.info()
### Pre Processing
# One-Hot Quality column
df = pd.concat([df, pd.get_dummies(df["quality"], drop_first=True)], axis=1)
df = df.drop(["quality"], axis=1)
y = df["color"]
x = df.drop(["color"], axis=1)
# Label Encode color column
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
y = label.fit_transform(df["color"])
# Train-Test Split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,
random_state=69)
### Model Building
import keras
from keras.models import Sequential
from keras.layers import Normalization
from keras.layers import Dense, Dropout
# Plot
import matplotlib.pyplot as plt
def plot_history(hist):
plt.plot(hist.history['loss'], label = 'loss')
plt.plot(hist.history['val_loss'], label='val loss')
plt.title("Loss vs Val_Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# Logging
output = pd.DataFrame(columns=["Model", "Accuracy", "Loss"])
### Base Model
model = Sequential([Normalization(),
Dense(64, activation="relu"),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid")])
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics="Accuracy")
hist = model.fit(x_train, y_train, epochs=10, verbose=False,
validation_data=(x_test,y_test))
Loss, Accuracy = model.evaluate(x_test, y_test, verbose=False)
print("Validation Result")
print("Loss :", Loss)
print("Accuracy :", Accuracy)
plot_history(hist)
output = output.append(pd.DataFrame(pd.DataFrame({"Model":["Base Model"],
"Accuracy" :[Accuracy],
"Loss" : [Loss]})), ignore_index=True)
### Base Model with L1 Regularization
model = Sequential([Normalization(),
Dense(64, activation="relu", kernel_regularizer='l1'),
Dense(16, activation="relu", kernel_regularizer='l1'),
Dense(1, activation="sigmoid")])
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics="Accuracy")
hist = model.fit(x_train, y_train, epochs=10, verbose=False,
validation_data=(x_test,y_test))
Loss, Accuracy = model.evaluate(x_test, y_test, verbose=False)
print("Validation Result")
print("Loss :", Loss)
print("Accuracy :", Accuracy)
plot_history(hist)
output = output.append(pd.DataFrame(pd.DataFrame({"Model":["L1"],
"Accuracy" :[Accuracy],
"Loss" : [Loss]})),
ignore_index=True)
### Base Model with L2 Regularization
model = Sequential([Normalization(),
Dense(64, activation="relu", kernel_regularizer='l2'),
Dense(16, activation="relu", kernel_regularizer='l2'),
Dense(1, activation="sigmoid")])
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics="Accuracy")
hist = model.fit(x_train, y_train, epochs=10, verbose=False,
validation_data=(x_test,y_test))
Loss, Accuracy = model.evaluate(x_test, y_test, verbose=False)
print("Validation Result")
print("Loss :", Loss)
print("Accuracy :", Accuracy)
plot_history(hist)
output = output.append(pd.DataFrame(pd.DataFrame({"Model":["L2"],
"Accuracy" :[Accuracy],
"Loss" : [Loss]})),
ignore_index=True)
### Base Model with Dropout
model = Sequential([Normalization(),
Dense(64, activation="relu"),
Dropout(0.2),
Dense(16, activation="relu"),
Dropout(0.5),
Dense(1, activation="sigmoid")])
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics="Accuracy")
hist = model.fit(x_train, y_train, epochs=10, verbose=False,
validation_data=(x_test,y_test))
Loss, Accuracy = model.evaluate(x_test, y_test, verbose=False)
print("Validation Result")
print("Loss :", Loss)
print("Accuracy :", Accuracy)
plot_history(hist)
output = output.append(pd.DataFrame(pd.DataFrame({"Model":["Dropout"],
"Accuracy" :[Accuracy],
"Loss" : [Loss]})),
ignore_index=True)
### Base Model with Early Stopping
model = Sequential([Normalization(),
Dense(64, activation="relu"),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid")])
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics="Accuracy")
callback = keras.callbacks.EarlyStopping(monitor='loss', patience=5)
hist = model.fit(x_train, y_train, epochs=100, callbacks=[callback],
verbose=False, validation_data=(x_test,y_test))
Loss, Accuracy = model.evaluate(x_test, y_test, verbose=False)
print("Validation Result")
print("Loss :", Loss)
print("Accuracy :", Accuracy)
plot_history(hist)
output = output.append(pd.DataFrame(pd.DataFrame({"Model":["Early Stopping"],
"Accuracy" :[Accuracy],
"Loss" : [Loss]})),
ignore_index=True)
### Linear Regression
import torch
import numpy as np
import matplotlib.pyplot as plt
x = torch.arange(-5,5,0.2).view(-1,1)
y = x*0.69 + torch.rand(x.size())
# Initialize Weights
w = torch.randn(1, requires_grad = True)
b = torch.randn(1, requires_grad = True)
# Training Loop
def predict():
return (x*w + b)
def calc_loss(y_pred):
return (((y - y_pred)** 2).sum() / 2*x.size()[0])
def train(epoch = 10000, lr = 0.0000001):
global w, b
for _ in range(epoch):
# Predicition
y_pred = predict()
# Loss
loss = calc_loss(y_pred)
# Calculate Grad
loss.backward()
with torch.no_grad():
w -= w.grad * lr
b -= b.grad * lr
w.grad.zero_()
b.grad.zero_()
train()
print("Loss :", calc_loss(predict()))
plt.scatter(x, y)
plt.plot(x.detach().numpy(), predict().detach().numpy(), c="red")
plt.legend(["Ground Truth", "Predict"][::-1])
plt.show()
Result: Thus, Regularization techniques and Linear Regression have been implemented.
Ex. No: 5
Ablation Studies - CNN
Date: 17/8/23
Aim: Perform Ablation Studies on a CNN Architecture.
Algorithm:
1) Import the necessary packages.
2) Load The Dataset.
3) Pre-Process the datasets with the respected methods.
4) Train an ANN and find the accuracy.
5) Tweak the Architecture among various options.
6) Tabulate the observations.
7) Plot the Inference for better understanding.
Program :
import numpy as np
import pandas as pd
import random
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.python import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout,
MaxPooling2D, BatchNormalization
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
import seaborn as sns
from keras.utils import np_utils
import matplotlib.pyplot as plt
%matplotlib inline
## Parameters
IMG_ROWS = 28
IMG_COLS = 28
NUM_CLASSES = 10
TEST_SIZE = 0.1
RANDOM_STATE = 2018
#Model
NO_EPOCHS = 150
PATIENCE = 20
VERBOSE = 1
BATCH_SIZE = 512
PATH="/home/ai_ds-b1/Downloads/"
train_file = PATH+"train.csv"
test_file = PATH+"test.csv"
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
print("MNIST train - rows:",train_df.shape[0]," columns:", train_df.shape[1])
print("MNIST test - rows:",test_df.shape[0]," columns:", test_df.shape[1])
# data preprocessing
def data_preprocessing(raw, hasLabel=True):
start_pixel = 0
if(hasLabel):
start_pixel = 1
if(hasLabel):
out_y = np_utils.to_categorical(raw.label, NUM_CLASSES)
else:
out_y = None
num_images = raw.shape[0]
x_as_array = raw.values[:,start_pixel:]
x_shaped_array = x_as_array.reshape(num_images, IMG_ROWS, IMG_COLS, 1)
out_x = x_shaped_array / 255
return out_x, out_y
# prepare the data
X, y = data_preprocessing(train_df)
X_test, y_test = data_preprocessing(test_df,hasLabel=False)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=TEST_SIZE,
random_state=RANDOM_STATE)
print("MNIST train - rows:",X_train.shape[0]," columns:", X_train.shape[1:4])
print("MNIST valid - rows:",X_val.shape[0]," columns:", X_val.shape[1:4])
print("MNIST test - rows:",X_test.shape[0]," columns:", X_test.shape[1:4])
# Model
model = Sequential()
# Add convolution 2D
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu', padding="same",
kernel_initializer='he_normal',input_shape=(IMG_ROWS, IMG_COLS, 1)))
# model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), strides=2,padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
# Compile the model
model.compile(loss = "categorical_crossentropy", optimizer="adam",
metrics=["accuracy"])
### Inspect the model
Let's check the model we initialized.
model.summary()
NO_EPOCHS = 5
from keras.callbacks import EarlyStopping, ModelCheckpoint
earlystopper = EarlyStopping(monitor='loss', patience=PATIENCE, verbose=VERBOSE)
checkpointer = ModelCheckpoint('best_model.h5',
monitor='val_acc',
verbose=VERBOSE,
save_best_only=True,
save_weights_only=True)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=NO_EPOCHS,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[earlystopper, checkpointer])
print("run model - predict validation set")
score = model.evaluate(X_val, y_val, verbose=0)
print(f'Last validation loss: {score[0]}, accuracy: {score[1]}')
score = model_optimal.evaluate(X_val, y_val, verbose=0)
print(f'Best validation loss: {score[0]}, accuracy: {score[1]}')
pred_y = np.argmax(model.predict(X_val),axis=1)
y_val = np.argmax(y_val,axis=1)
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(10,10))
sns.heatmap(confusion_matrix(y_val,pred_y),annot=True)
Result: Thus, using TensorFlow library, Ablation Studies on CNN have been performed.
Ex. No: 6
CNN vs ANN - MNIST
Date: 24/8/23
Aim: To write a program in TensorFlow to compare results between ANN and CNN on MNIST.
Algorithm:
1) Import the necessary packages.
2) Load the Dataset.
3) Pre-Process the datasets with the respected methods.
4) Implement ANN and CNN.
5) Compare the results.
6) Plot the Inference.
Program :
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
### ANN
data = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = data.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
model = Sequential()
model.add(Dense(128, activation='relu', input_shape = (784,)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model_history = model.fit(x_train, y_train, epochs=10, verbose=0)
print("Accuracy : ", model.evaluate(x_test, y_test, verbose=0)[1])
### CNN
data = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = data.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(28, 28,
1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model_history = model.fit(x_train, y_train, epochs=10, verbose=0, batch_size=512)
print("Accuracy : ", model.evaluate(x_test, y_test, verbose=0)[1])
Result: Thus, using TensorFlow library, CNN and ANN have been implemented and compared.
Ex. No: 7
Sentiment analysis using RNN
Date: 31/8/23
Aim: To write a program in TensorFlow to perform Sentiment analysis using RNN.
Algorithm:
1) Import the necessary packages.
2) Load IMDB dataset.
3) Pre-Process the Datasets
4) Train a RNN model with the set of Hyper-Parameters.
5) Display the accuracy.
6) Plot the Accuracy metrices.
Program :
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
(train_x, train_y) , (test_x, test_y) =
keras.datasets.imdb.load_data(num_words=10000)
print("Shape")
print("Train X : ", train_x.shape)
print("Train Y : ", train_y.shape)
print("Test X : ", test_x.shape)
print("Test Y : ", test_y.shape)
print("Maximum Value of word Index :", max([max(i) for i in train_x]))
print("Maximum Length of word:", max([len(i) for i in train_x]))
from keras.preprocessing.sequence import pad_sequences
train_x = pad_sequences(train_x, maxlen=269)
test_x = pad_sequences(test_x, maxlen=269)
from keras.models import Sequential
rnn = Sequential()
rnn.add(keras.layers.Embedding(10000,32,input_length =269))
rnn.add(keras.layers.SimpleRNN(16, activation="relu"))
rnn.add(keras.layers.Dense(1))
rnn.add(keras.layers.Activation("sigmoid"))
print(rnn.summary())
rnn.compile(loss="binary_crossentropy", optimizer="rmsprop",
metrics=["accuracy"])
history = rnn.fit(train_x, train_y, epochs = 10, batch_size=128, verbose = 1)
score = rnn.evaluate(test_x, test_y, verbose=0)
print("Accuracy", score[1]*100)
Result: Thus, using TensorFlow library, RNN have been implemented to find sentiment.
Ex. No: 8
LSTM vs GRU
Date: 14/9/23
Aim: Load a text sequence and a vector sequence dataset of your choice and apply LSTM and
GRU model to compare the accuracy and time complexity for both models.
Algorithm:
1) Import the necessary packages.
2) Load text sequence and vector sequence dataset.
3) Apply Pre-Processing techniques.
4) Train LSTM and GRU model.
5) Compare the accuracy and time complexity.
Program :
## Using Text Sequence
import pandas as pd
import tensorflow as tf
import numpy as np
Data = pd.read_csv("training.csv")
Test = pd.read_csv('test.csv')
Data.head()
Test.head()
def Pre(xyz):
Input = [i for i in xyz]
stop_words =
['a','an','i','is','the','am','are','has','have','you','she','it','he','him','her
','had','that','there','where','when','why'\
'while','though','this','can','go','so']
I1 = [i1.split() for i1 in Input]
I2 = [ [i for i in j if i not in stop_words] for j in I1]
I1 =[]
for i in I2:
for j in i:
if j not in I1:
I1.append(j)
return I1
Inpt = Data['text']
out = Data['label']
Input = [i for i in Data['text']]
from sklearn.preprocessing import OneHotEncoder
OE = OneHotEncoder()
Out = OE.fit_transform(np.array(Data['label']).reshape(-1,1)).toarray()
stop_words =
['a','an','i','is','the','am','are','has','have','you','she','it','he','him','her
','had','that','there','where','when','why'\
'while','though','this','can','go','so']
I1 = [i1.split() for i1 in Input]
I2 = [ [i for i in j if i not in stop_words] for j in I1]
I1 =[]
for i in I2:
for j in i:
if j not in I1:
I1.append(j)
Size = 300
Input = pd.DataFrame(data = 0 , index =[i for i in
range(len(I2))],columns=I1[:Size])
for i in range(len(I2)):
for j in range(len(I2[i])):
Input.iloc[i][I2[i][j]] = 1
import tensorflow as tf
from keras.layers import LSTM,Dense,GRU
from keras.models import Sequential
from keras.losses import CategoricalCrossentropy
model = Sequential()
model.add(LSTM(units=100,activation='relu',input_shape=(1,Size),return_sequences=
True))
model.add(Dense(Out.shape[1],activation='softmax'))
model.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'
])
model.summary()
model.fit(Input.values.reshape(-1,1,Size),Out.reshape(-1,1,6),epochs=300)
Input_test = Pre(Test['text'])
Input_t = pd.DataFrame(data = 0 , index =[i for i in
range(len(I2))],columns=I1[:Size])
for i in range(len(Input_test)):
for j in range(len(Input_test[i])):
Input_t.iloc[i][Input_test[i][j]] = 1
Out = OE.fit_transform(np.array(Test['label']).reshape(-1,1)).toarray()
model = Sequential()
model.add(GRU(units=100,activation='relu',input_shape=(1,Size),return_sequences=T
rue))
model.add(Dense(Out.shape[1],activation='softmax'))
model.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'
])
model.summary()
model.fit(Input.values.reshape(-1,1,Size),Out.reshape(-1,1,6),epochs=300)
from sklearn.datasets import load_digits
data = load_digits()
inp.shape
inp = data['data']
out_vec = OE.fit_transform(data['target'].reshape(-1,1)).toarray()
model = Sequential()
model.add(LSTM(units=100,activation='relu',input_shape=(1,inp.shape[-
1]),return_sequences=True))
model.add(Dense(out_vec.shape[-1],activation='softmax'))
model.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'
])
model.fit(inp.reshape(inp.shape[0],1,inp.shape[1]),out_vec.reshape(out_vec.shape[
0],1,out_vec.shape[1]),epochs=100)
model = Sequential()
model.add(GRU(units=100,activation='relu',input_shape=(1,inp.shape[-
1]),return_sequences=True))
model.add(Dense(out_vec.shape[-1],activation='softmax'))
model.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'
])
model.fit(inp.reshape(inp.shape[0],1,inp.shape[1]),out_vec.reshape(out_vec.shape[
0],1,out_vec.shape[1]),epochs=100)
## END
Result: Thus using TensorFlow library, LSTM and GRU have been implemented and
compared.
Ex. No: 9
Text Generation using Gated Recurrent Unit Networks
Date: 21/9/23
Aim: To Generation text using Gated Recurrent Unit Networks.
Algorithm:
Step 1: Importing the required libraries.
Step 2: Loading the data into a string.
Step 3: Creating a mapping from each unique character in the text to a unique number.
Step 4: Pre-processing the data.
Step 5: Building the GRU network.
Step 6: Defining some helper functions which will be used during the training of the network.
a) Helper function to sample the next character:
b) Helper function to generate text after each epoch
c) Helper function to save the model after each epoch in which loss decreases
d) Helper function to reduce the learning rate each time the learning plateaus
Step 7: Training the GRU model (Take 15 epochs, batch size is individual decision (for e.g 128))
Step 8: Generating new and random text.
Program:
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM, GRU
from keras.optimizers import RMSprop
from keras.callbacks import LambdaCallback
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
import random
import sys
with open('poems.txt', 'r') as file:
text = file.read()
print(text)
vocabulary = sorted(list(set(text)))
char_to_indices = dict((c, i) for i, c in enumerate(vocabulary))
indices_to_char = dict((i, c) for i, c in enumerate(vocabulary))
print(vocabulary)
max_length = 100
steps = 5
sentences = []
next_chars = []
for i in range(0, len(text) - max_length, steps):
sentences.append(text[i: i + max_length])
next_chars.append(text[i + max_length])
X = np.zeros((len(sentences), max_length, len(vocabulary)), dtype = np.bool)
y = np.zeros((len(sentences), len(vocabulary)), dtype = np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_to_indices[char]] = 1
y[i, char_to_indices[next_chars[i]]] = 1
model = Sequential()
model.add(GRU(128, input_shape =(max_length, len(vocabulary))))
model.add(Dense(len(vocabulary)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr = 0.01)
model.compile(loss ='categorical_crossentropy', optimizer = optimizer)
def sample_index(preds, temperature = 1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, logs):
print()
print('----- Generating text after Epoch: % d' % epoch)
start_index = random.randint(0, len(text) - max_length - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + max_length]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, max_length, len(vocabulary)))
for t, char in enumerate(sentence):
x_pred[0, t, char_to_indices[char]] = 1.
preds = model.predict(x_pred, verbose = 0)[0]
next_index = sample_index(preds, diversity)
next_char = indices_to_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print_callback = LambdaCallback(on_epoch_end = on_epoch_end)
filepath = "weights.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor ='loss',
verbose = 1, save_best_only = True,
mode ='min')
reduce_alpha = ReduceLROnPlateau(monitor ='loss', factor = 0.2,
patience = 1, min_lr = 0.001)
callbacks = [print_callback, checkpoint, reduce_alpha]
model.fit(X, y, batch_size = 128, epochs = 15, callbacks = callbacks)
def generate_text(length, diversity):
start_index = random.randint(0, len(text) - max_length - 1)
generated = ''
sentence = text[start_index: start_index + max_length]
generated += sentence
for i in range(length):
x_pred = np.zeros((1, max_length, len(vocabulary)))
for t, char in enumerate(sentence):
x_pred[0, t, char_to_indices[char]] = 1.
preds = model.predict(x_pred, verbose = 0)[0]
next_index = sample_index(preds, diversity)
next_char = indices_to_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
return generated
print(generate_text(500, 0.2))
Result: Thus, using TensorFlow library, GRU have been implemented to generate text.
Ex. No: 10
Object Detection
Date: 28/9/23
Aim: To use convolutional neural network (CNN) for object detection from CIFAR-10 dataset.
Algorithm:
1) Import the necessary packages.
2) Load CIFAR-10 dataset.
3) Pre-process the Datasets
4) Train a CNN model.
5) Display the accuracy.
Program :
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train/255
x_test = x_test/255
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
# Model Building
model = keras.Sequential(
[
keras.layers.Conv2D(filters=32,kernel_size=(5,5),padding="same",activation="relu"
,input_shape=(32,32,3)),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Conv2D(filters=64,kernel_size=(5,5),padding="same",activation="relu"
),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Conv2D(filters=128,kernel_size=(5,5),padding="same",activation="relu
"),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Flatten(),
keras.layers.Dense(512,activation="relu"),
keras.layers.Dense(128,activation="relu"),
keras.layers.Dense(10,activation="softmax")
])
# Compile Model
opt = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
# Train Model
model_history = model.fit(x_train,y_train,epochs=5, batch_size=4,
validation_data=(x_test,y_test))
# Accuracy
_, accuracy = model.evaluate(x_test,y_test,verbose=0)
print("Accuracy :", accuracy)
model.summary()
Result: Thus, using TensorFlow library, CNN have been implemented for object detection.
Ex. No: 11
Fraud Detection
Date: 5/10/23
Aim: Credit card fraud detection using creditcard.csv dataset and using random forest. Print the
amount details for Normal Transaction, plot the Correlation Matrix, print accuracy,
precision, recall and F1 score, print confusion matrix.
Algorithm:
1) Import the necessary packages.
2) Load Credit Card Dataset.
3) Pre-Process the datasets,
4) Train a random forest classifier.
5) Display the accuracy, precision, recall, F1 score and confusion matrix.
Program :
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("creditcard.csv")
print("Shape :", df.shape)
df.head()
# Columns
df.columns
df.describe()
### Amount details for Normal Transaction
df[df["Class"] == 0]["Amount"].describe()
### DownSampling
df["Class"].value_counts()
from sklearn.utils import resample
zero_df = resample(df[df["Class"] == 0], n_samples=492)
sample_df = pd.concat([df[df["Class"] == 1], zero_df], ignore_index=True)
### Correlation Matrix
sample_df.corr()
### Random Forest
from sklearn.model_selection import train_test_split
x = sample_df.iloc[:,:-1].values
y = sample_df.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=69,
test_size=0.2)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
### Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, y_pred))
Result: Thus, using Sklearn library, fraud detection have been implemented.
Ex. No: 12
Autoencoder
Date: 12/10/23
Aim: To implement vanilla autoencoder on MNIST dataset and calculate the loss vs. epoch
curve for training and validation set.
Algorithm:
1) Import the necessary packages.
2) Load MNIST dataset.
3) Train the autoencoder model.
4) Display the accuracy.
Program :
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Input, Flatten, Reshape, LeakyReLU as
LR, Activation, Dropout
from tensorflow.keras.models import Model, Sequential
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train/255.0
x_test = x_test/255.0
plt.imshow(x_train[0], cmap = "gray")
plt.show()
LATENT_SIZE = 32
encoder = Sequential([
Flatten(input_shape = (28, 28)),
Dense(512),
LR(),
Dropout(0.5),
Dense(256),
LR(),
Dropout(0.5),
Dense(128),
LR(),
Dropout(0.5),
Dense(64),
LR(),
Dropout(0.5),
Dense(LATENT_SIZE),
LR()
])
decoder = Sequential([
Dense(64, input_shape = (LATENT_SIZE,)),
LR(),
Dropout(0.5),
Dense(128),
LR(),
Dropout(0.5),
Dense(256),
LR(),
Dropout(0.5),
Dense(512),
LR(),
Dropout(0.5),
Dense(784),
Activation("sigmoid"),
Reshape((28, 28))
])
img = Input(shape = (28, 28))
latent_vector = encoder(img)
output = decoder(latent_vector)
model = Model(inputs = img, outputs = output)
model.compile("nadam", loss = "binary_crossentropy")
EPOCHS = 10
for epoch in range(EPOCHS):
fig, axs = plt.subplots(4, 4)
rand = x_test[np.random.randint(0, 10000, 16)].reshape((4, 4, 1, 28, 28))
display.clear_output()
for i in range(4):
for j in range(4):
axs[i, j].imshow(model.predict(rand[i, j])[0], cmap = "gray")
axs[i, j].axis("off")
plt.subplots_adjust(wspace = 0, hspace = 0)
plt.show()
print("-----------", "EPOCH", epoch, "-----------")
model.fit(x_train, x_train)
Result: Thus, using TensorFlow library, Autoencoder has been implemented.
Ex. No: 13
Anomaly Detection System
Date: 19/10/23
Aim: To develop an anomaly detection system, which is trained by the normal data only but will
be used to identify the normal data and the anomaly data during testing.
Algorithm:
1) Import the necessary packages.
2) Load MNIST as normal data and Fashion MNIST as anomaly data.
3) Pre-process the data.
4) Train a CNN Autoencoder model.
5) Now test the trained model with the anomaly data and display the outputs.
6) Display the accuracy for anomaly data.
Program :
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import datasets
digit = datasets.mnist.load_data()
fashion = datasets.fashion_mnist.load_data()
# Train-test Split - DIGIT
x_train = digit[0][0]
x_test = digit[1][0]
# Fashion - Test
fashion_test = fashion[1][0]
# DIGIT
x_train = x_train / 255
x_test = x_test / 255
x_train = x_train.reshape(len(x_train), 28, 28, 1)
x_test = x_test.reshape(len(x_test), 28, 28, 1)
# FASHION
fashion_test = fashion_test / 255
fashion_test = fashion_test.reshape(len(fashion_test), 28, 28, 1)
def plot_image(array, sample_size = 5):
index = 1
plt.figure(figsize=(20, 4))
for i in np.random.choice(array.shape[0], size = sample_size):
ax = plt.subplot(2, 10, index)
index += 1
plt.imshow(array[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plot_image(x_train)
plot_image(fashion_test)
### Convolutional Autoencoder
import keras
input_img = keras.Input(shape = (28, 28, 1))
x = keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = keras.layers.MaxPooling2D((2, 2), padding='same')(x)
x = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = keras.layers.UpSampling2D((2, 2))(x)
x = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = keras.layers.UpSampling2D((2, 2))(x)
x = keras.layers.Conv2D(16, (3, 3), activation='relu')(x)
x = keras.layers.UpSampling2D((2, 2))(x)
decoded = keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = keras.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy',metrics =
['accuracy'])
autoencoder.summary()
autoencoder.fit(x_train, x_train, epochs=25, batch_size=64,
validation_data=(x_test, x_test))
digit_predict = autoencoder.predict(x_test)
fashion_predict = autoencoder.predict(fashion_test)
# Regenerated Image
plot_image(digit_predict)
plot_image(fashion_predict)
# Fixing Threshold
reconstruction_error_digit = []
reconstruction_error_fashion = []
for i in x_test[:100]:
error = autoencoder.evaluate([i], [i], verbose=0)[0]
reconstruction_error_digit.append(error)
for i in fashion_test[:100]:
error = autoencoder.evaluate([i], [i], verbose=0)[0]
reconstruction_error_fashion.append(error)
reconstruction_error_digit = np.array(reconstruction_error_digit)
reconstruction_error_fashion = np.array(reconstruction_error_fashion)
threshold = [reconstruction_error_digit.mean() -
2*reconstruction_error_digit.std(), reconstruction_error_digit.mean() +
2*reconstruction_error_digit.std()]
def detect_anomaly(image):
error = autoencoder.evaluate([image], [image], verbose=0)[0]
if error >= threshold[0] and error <= threshold[1]:
print("Noice !!!")
return 0
else:
print("Anomaly Detected")
return 1
accuracy = 0
for i in fashion_test[10:20]:
accuracy += detect_anomaly(i)
print("Accuracy : ", accuracy)
Result: Thus, using TensorFlow library, Anomaly detection system has been implemented.
Raw data
{
"_id": null,
"home_page": "",
"name": "snudlhelper",
"maintainer": "",
"docs_url": null,
"requires_python": ">=3.7",
"maintainer_email": "",
"keywords": "",
"author": "",
"author_email": "SheriffAbdullah <abdullah21110220@snuchennai.edu.in>",
"download_url": "https://files.pythonhosted.org/packages/6d/ac/f4923fea46688f1911155d6b39291297a1bdb2061330a06be5ffa07a1c70/snudlhelper-0.0.1.tar.gz",
"platform": null,
"description": "Ex. No: 1\nImplementing Logic Gates using Perceptron Algorithm\nDate: 20/7/23\n\nAim: To write a program using python to implement logic gates using perceptron algorithm.\n\nAlgorithm:\n1) Import necessary packages.\n2) Assign different weights and bias for each logic gate.\n3) Perform the Required Operations.\n4) Display the Output\n\nProgram :\nimport numpy as np\ndef step(x):\nreturn 1 if x >= 0.5 else 0\n### Not Gate\nX = np.array([[0],[1]])\nweights = np.array([-1])\nbias = np.array([1])\nout = X@weights + bias\nprint(out)\n### Tautology\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\nweights = np.array([[0],[0]])\nbias = np.array([1])\nout = X@weights + bias\nprint(out)\n### AND Gate\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\nweights = np.array([[1],[1]])\nbias = np.array([-1])\nout = X@weights + bias\ny = np.array(list(map(step, out))).reshape(4,1)\nprint(y)\n### OR Gate\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\nweights = np.array([[1],[1]])\nbias = np.array([0])\nout = X@weights + bias\ny = np.array(list(map(step, out))).reshape(4,1)\nprint(y)\n### NOR Gate\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\nweights = np.array([[-1],[-1]])\nbias = np.array([1])\nout = X@weights + bias\ny = np.array(list(map(step, out))).reshape(4,1)\nprint(y)\n### NAND Gate\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\nweights = np.array([[-1],[-1]])\nbias = np.array([2])\nout = X@weights + bias\ny = np.array(list(map(step, out))).reshape(4,1)\nprint(y)\n### XOR\ndef step(x):\nreturn 1 if x>0 else 0\nstep = np.vectorize(step)\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([[0],[1],[1],[0]])\nw1 = np.array([[1,-1],[-1,1]])\nw2 = np.array([[1],[1]])\ny_cap = step(step(x@w1)@w2)\ny_cap\n### XNOR\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([[1],[0],[0],[1]])\nw1 = np.array([[1,-1],[1,-1]])\nw2 = np.array([[1],[1]])\nbias = np.array([[-1,1]])\ny_cap = step(step(x@w1 + bias)@w2)\ny_cap\n\nResult: Thus, using Python, the logic gates have been implemented\n\n\nEx. No: 2\nMLP and Linear Regression\nDate: 20/7/23\n\nAim: To write a program in TensorFlow for MLP and Linear Regression.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load the Datasets.\n3) Compile the model.\n4) Evaluate the model.\n5) Print the metrices.\n\nProgram :\n### MNIST\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport tensorflow as tf\nimport tensorflow.keras.datasets.mnist as mnist\n(x_train,y_train), (x_test,y_test) = mnist.load_data()\nmodel = tf.keras.models.Sequential([\ntf.keras.layers.Normalization(),\ntf.keras.layers.Flatten(input_shape=(28,28)),\ntf.keras.layers.Dense(128, activation=\"relu\"),\ntf.keras.layers.Dense(10)\n])\nmodel.compile(\noptimizer = tf.keras.optimizers.Adam(0.001),\nloss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\nmetrics=[tf.keras.metrics.SparseCategoricalAccuracy()]\n)\nmodel.fit(x=x_train,y=y_train,batch_size=64,epochs=10)\nmodel.evaluate(x=x_test,y=y_test)\n### Linear Regression Using Tensorflow\nlearning_rate = 0.01\ntraining_epochs = 1000\nnp.random.seed(69)\nx = np.random.random_sample(500).reshape(-1,1)\ny = x*2 + 0.5*np.random.random(500).reshape(-1,1)\n#### Training Loop\nmodel = tf.keras.models.Sequential([tf.keras.layers.Dense(1)])\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\nloss='mean_absolute_error')\nmodel.fit(x,y,epochs=training_epochs,batch_size=128,verbose=False)\ny_pred = model.predict(x, verbose=False)\nplt.scatter(x,y)\nplt.plot(x, y_pred, c=\"red\")\nprint(\"Training Cost :\", model.evaluate(x,y,verbose=False))\nprint(\"Weight :\", model.get_weights()[0])\nprint(\"Bias :\", model.get_weights()[1])\n\nResult: Thus, using TensorFlow library, MLP and Linear Regression have been implemented.\n\n\nEx. No: 3\nAblation Studies\nDate: 27/7/23\n\nAim: To Perform Ablation Studies on a Neural Network Architecture.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Find a Vector dataset from Open Source.\n3) Train the designed architecture with initial set of parameters\n4) Change the training methods with various hyperparameters and record the observations.\n5) Make comments about the inferences made from the table.\n\nProgram :\n### Wine Dataset\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ndf = pd.read_csv(\"Wine.csv\")\nprint(\"Shape :\", df.shape)\ndf.head()\ndf.info()\n### Pre Processing\n# One-Hot Quality column\ndf = pd.concat([df, pd.get_dummies(df[\"quality\"], drop_first=True)], axis=1)\ndf = df.drop([\"quality\"], axis=1)\ny = df[\"color\"]\nx = df.drop([\"color\"], axis=1)\n# Label Encode color column\nfrom sklearn.preprocessing import LabelEncoder\nlabel = LabelEncoder()\ny = label.fit_transform(df[\"color\"])\n# Train-Test Split\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,\nrandom_state=69)\n### Model Building\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Normalization\nfrom keras.layers import Dense\narchitecture = [\nSequential([Normalization(),\nDense(32, activation=\"relu\"),\nDense(16, activation=\"relu\"),\nDense(1, activation=\"sigmoid\")]),\nSequential([Normalization(),\nDense(64, activation=\"relu\"),\nDense(32, activation=\"relu\"),\nDense(16, activation=\"relu\"),\nDense(1, activation=\"sigmoid\")]),\nSequential([Normalization(),\nDense(128, activation=\"relu\"),\nDense(32, activation=\"relu\"),\nDense(1, activation=\"sigmoid\")])\n]\noptimizer = [keras.optimizers.Adam(learning_rate=0.01),\nkeras.optimizers.Adam(learning_rate=0.001),\nkeras.optimizers.SGD(learning_rate=0.01)]\nepochs = [5,10,20]\noutput = pd.DataFrame(columns=[\"No of Layers\",\"No of\nParams\",\"Optimizer\",\"Learning Rate\",\"Epochs\",\"Accuracy\"])\nfor arch in architecture:\nfor opti in optimizer:\nfor epoch in epochs:\nmodel = arch\nmodel.compile(optimizer=opti, loss=\"BinaryCrossentropy\",\nmetrics=\"Accuracy\")\nmodel.fit(x_train, y_train, epochs=epoch, verbose=False)\ny_pred = model.evaluate(x_test, y_test, verbose=False)\noutput = output.append(pd.DataFrame(\n{\"No of Layers\" : [len(model.layers)],\n\"No of Params\" : [model.count_params()],\n\"Optimizer\" : [opti.get_config()[\"name\"]],\n\"Learning Rate\" : [opti.learning_rate.numpy()],\n\"Epochs\" : [epoch],\n\"Accuracy\" : [y_pred[1]]}))\n\nResult: Thus, using TensorFlow library, Ablation Studies on neural network have been\nperformed.\n\n\nEx. No: 4\nRegularization Techniques and Linear Regression\nDate: 3/8/23\n\nAim: To perform Regularization Techniques and implement Linear Regression using PyTorch.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load the Dataset.\n3) Perform Pre-Processing steps on the dataset before fitting to the model.\n4) Compare the Base Model with various Regularization methods and record the\nobservations.\n5) Compare the observations\n6) Plot the Inference for better Understanding\n\nProgram :\n### Wine Dataset\nimport pandas as pd\ndf = pd.read_csv(\"Wine.csv\")\nprint(\"Shape :\", df.shape)\ndf.head()\ndf.info()\n### Pre Processing\n# One-Hot Quality column\ndf = pd.concat([df, pd.get_dummies(df[\"quality\"], drop_first=True)], axis=1)\ndf = df.drop([\"quality\"], axis=1)\ny = df[\"color\"]\nx = df.drop([\"color\"], axis=1)\n# Label Encode color column\nfrom sklearn.preprocessing import LabelEncoder\nlabel = LabelEncoder()\ny = label.fit_transform(df[\"color\"])\n# Train-Test Split\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,\nrandom_state=69)\n### Model Building\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Normalization\nfrom keras.layers import Dense, Dropout\n# Plot\nimport matplotlib.pyplot as plt\ndef plot_history(hist):\nplt.plot(hist.history['loss'], label = 'loss')\nplt.plot(hist.history['val_loss'], label='val loss')\nplt.title(\"Loss vs Val_Loss\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.show()\n# Logging\noutput = pd.DataFrame(columns=[\"Model\", \"Accuracy\", \"Loss\"])\n### Base Model\nmodel = Sequential([Normalization(),\nDense(64, activation=\"relu\"),\nDense(16, activation=\"relu\"),\nDense(1, activation=\"sigmoid\")])\noptimizer = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=optimizer, loss=\"BinaryCrossentropy\", metrics=\"Accuracy\")\nhist = model.fit(x_train, y_train, epochs=10, verbose=False,\nvalidation_data=(x_test,y_test))\nLoss, Accuracy = model.evaluate(x_test, y_test, verbose=False)\nprint(\"Validation Result\")\nprint(\"Loss :\", Loss)\nprint(\"Accuracy :\", Accuracy)\nplot_history(hist)\noutput = output.append(pd.DataFrame(pd.DataFrame({\"Model\":[\"Base Model\"],\n\"Accuracy\" :[Accuracy],\n\"Loss\" : [Loss]})), ignore_index=True)\n### Base Model with L1 Regularization\nmodel = Sequential([Normalization(),\nDense(64, activation=\"relu\", kernel_regularizer='l1'),\nDense(16, activation=\"relu\", kernel_regularizer='l1'),\nDense(1, activation=\"sigmoid\")])\noptimizer = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=optimizer, loss=\"BinaryCrossentropy\", metrics=\"Accuracy\")\nhist = model.fit(x_train, y_train, epochs=10, verbose=False,\nvalidation_data=(x_test,y_test))\nLoss, Accuracy = model.evaluate(x_test, y_test, verbose=False)\nprint(\"Validation Result\")\nprint(\"Loss :\", Loss)\nprint(\"Accuracy :\", Accuracy)\nplot_history(hist)\noutput = output.append(pd.DataFrame(pd.DataFrame({\"Model\":[\"L1\"],\n\"Accuracy\" :[Accuracy],\n\"Loss\" : [Loss]})),\nignore_index=True)\n### Base Model with L2 Regularization\nmodel = Sequential([Normalization(),\nDense(64, activation=\"relu\", kernel_regularizer='l2'),\nDense(16, activation=\"relu\", kernel_regularizer='l2'),\nDense(1, activation=\"sigmoid\")])\noptimizer = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=optimizer, loss=\"BinaryCrossentropy\", metrics=\"Accuracy\")\nhist = model.fit(x_train, y_train, epochs=10, verbose=False,\nvalidation_data=(x_test,y_test))\nLoss, Accuracy = model.evaluate(x_test, y_test, verbose=False)\nprint(\"Validation Result\")\nprint(\"Loss :\", Loss)\nprint(\"Accuracy :\", Accuracy)\nplot_history(hist)\noutput = output.append(pd.DataFrame(pd.DataFrame({\"Model\":[\"L2\"],\n\"Accuracy\" :[Accuracy],\n\"Loss\" : [Loss]})),\nignore_index=True)\n### Base Model with Dropout\nmodel = Sequential([Normalization(),\nDense(64, activation=\"relu\"),\nDropout(0.2),\nDense(16, activation=\"relu\"),\nDropout(0.5),\nDense(1, activation=\"sigmoid\")])\noptimizer = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=optimizer, loss=\"BinaryCrossentropy\", metrics=\"Accuracy\")\nhist = model.fit(x_train, y_train, epochs=10, verbose=False,\nvalidation_data=(x_test,y_test))\nLoss, Accuracy = model.evaluate(x_test, y_test, verbose=False)\nprint(\"Validation Result\")\nprint(\"Loss :\", Loss)\nprint(\"Accuracy :\", Accuracy)\nplot_history(hist)\noutput = output.append(pd.DataFrame(pd.DataFrame({\"Model\":[\"Dropout\"],\n\"Accuracy\" :[Accuracy],\n\"Loss\" : [Loss]})),\nignore_index=True)\n### Base Model with Early Stopping\nmodel = Sequential([Normalization(),\nDense(64, activation=\"relu\"),\nDense(16, activation=\"relu\"),\nDense(1, activation=\"sigmoid\")])\noptimizer = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer=optimizer, loss=\"BinaryCrossentropy\", metrics=\"Accuracy\")\ncallback = keras.callbacks.EarlyStopping(monitor='loss', patience=5)\nhist = model.fit(x_train, y_train, epochs=100, callbacks=[callback],\nverbose=False, validation_data=(x_test,y_test))\nLoss, Accuracy = model.evaluate(x_test, y_test, verbose=False)\nprint(\"Validation Result\")\nprint(\"Loss :\", Loss)\nprint(\"Accuracy :\", Accuracy)\nplot_history(hist)\noutput = output.append(pd.DataFrame(pd.DataFrame({\"Model\":[\"Early Stopping\"],\n\"Accuracy\" :[Accuracy],\n\"Loss\" : [Loss]})),\nignore_index=True)\n### Linear Regression\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nx = torch.arange(-5,5,0.2).view(-1,1)\ny = x*0.69 + torch.rand(x.size())\n# Initialize Weights\nw = torch.randn(1, requires_grad = True)\nb = torch.randn(1, requires_grad = True)\n# Training Loop\ndef predict():\nreturn (x*w + b)\ndef calc_loss(y_pred):\nreturn (((y - y_pred)** 2).sum() / 2*x.size()[0])\ndef train(epoch = 10000, lr = 0.0000001):\nglobal w, b\nfor _ in range(epoch):\n# Predicition\ny_pred = predict()\n# Loss\nloss = calc_loss(y_pred)\n# Calculate Grad\nloss.backward()\nwith torch.no_grad():\nw -= w.grad * lr\nb -= b.grad * lr\nw.grad.zero_()\nb.grad.zero_()\ntrain()\nprint(\"Loss :\", calc_loss(predict()))\nplt.scatter(x, y)\nplt.plot(x.detach().numpy(), predict().detach().numpy(), c=\"red\")\nplt.legend([\"Ground Truth\", \"Predict\"][::-1])\nplt.show()\n\nResult: Thus, Regularization techniques and Linear Regression have been implemented.\nEx. No: 5\nAblation Studies - CNN\nDate: 17/8/23\n\nAim: Perform Ablation Studies on a CNN Architecture.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load The Dataset.\n3) Pre-Process the datasets with the respected methods.\n4) Train an ANN and find the accuracy.\n5) Tweak the Architecture among various options.\n6) Tabulate the observations.\n7) Plot the Inference for better understanding.\n\nProgram :\nimport numpy as np\nimport pandas as pd\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom tensorflow.python import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout,\nMaxPooling2D, BatchNormalization\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nimport seaborn as sns\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\n%matplotlib inline\n## Parameters\nIMG_ROWS = 28\nIMG_COLS = 28\nNUM_CLASSES = 10\nTEST_SIZE = 0.1\nRANDOM_STATE = 2018\n#Model\nNO_EPOCHS = 150\nPATIENCE = 20\nVERBOSE = 1\nBATCH_SIZE = 512\nPATH=\"/home/ai_ds-b1/Downloads/\"\ntrain_file = PATH+\"train.csv\"\ntest_file = PATH+\"test.csv\"\ntrain_df = pd.read_csv(train_file)\ntest_df = pd.read_csv(test_file)\nprint(\"MNIST train - rows:\",train_df.shape[0],\" columns:\", train_df.shape[1])\nprint(\"MNIST test - rows:\",test_df.shape[0],\" columns:\", test_df.shape[1])\n# data preprocessing\ndef data_preprocessing(raw, hasLabel=True):\nstart_pixel = 0\nif(hasLabel):\nstart_pixel = 1\nif(hasLabel):\nout_y = np_utils.to_categorical(raw.label, NUM_CLASSES)\nelse:\nout_y = None\nnum_images = raw.shape[0]\nx_as_array = raw.values[:,start_pixel:]\nx_shaped_array = x_as_array.reshape(num_images, IMG_ROWS, IMG_COLS, 1)\nout_x = x_shaped_array / 255\nreturn out_x, out_y\n# prepare the data\nX, y = data_preprocessing(train_df)\nX_test, y_test = data_preprocessing(test_df,hasLabel=False)\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=TEST_SIZE,\nrandom_state=RANDOM_STATE)\nprint(\"MNIST train - rows:\",X_train.shape[0],\" columns:\", X_train.shape[1:4])\nprint(\"MNIST valid - rows:\",X_val.shape[0],\" columns:\", X_val.shape[1:4])\nprint(\"MNIST test - rows:\",X_test.shape[0],\" columns:\", X_test.shape[1:4])\n# Model\nmodel = Sequential()\n# Add convolution 2D\nmodel.add(Conv2D(32, kernel_size=(3, 3),activation='relu', padding=\"same\",\nkernel_initializer='he_normal',input_shape=(IMG_ROWS, IMG_COLS, 1)))\n# model.add(BatchNormalization())\nmodel.add(Conv2D(32,kernel_size=(3, 3), activation='relu'))\nmodel.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), strides=2,padding='same',\nactivation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(NUM_CLASSES, activation='softmax'))\n# Compile the model\nmodel.compile(loss = \"categorical_crossentropy\", optimizer=\"adam\",\nmetrics=[\"accuracy\"])\n### Inspect the model\nLet's check the model we initialized.\nmodel.summary()\nNO_EPOCHS = 5\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nearlystopper = EarlyStopping(monitor='loss', patience=PATIENCE, verbose=VERBOSE)\ncheckpointer = ModelCheckpoint('best_model.h5',\nmonitor='val_acc',\nverbose=VERBOSE,\nsave_best_only=True,\nsave_weights_only=True)\nhistory = model.fit(X_train, y_train,\nbatch_size=BATCH_SIZE,\nepochs=NO_EPOCHS,\nverbose=1,\nvalidation_data=(X_val, y_val),\ncallbacks=[earlystopper, checkpointer])\nprint(\"run model - predict validation set\")\nscore = model.evaluate(X_val, y_val, verbose=0)\nprint(f'Last validation loss: {score[0]}, accuracy: {score[1]}')\nscore = model_optimal.evaluate(X_val, y_val, verbose=0)\nprint(f'Best validation loss: {score[0]}, accuracy: {score[1]}')\npred_y = np.argmax(model.predict(X_val),axis=1)\ny_val = np.argmax(y_val,axis=1)\nfrom sklearn.metrics import confusion_matrix\nplt.figure(figsize=(10,10))\nsns.heatmap(confusion_matrix(y_val,pred_y),annot=True)\n\nResult: Thus, using TensorFlow library, Ablation Studies on CNN have been performed.\n\nEx. No: 6\nCNN vs ANN - MNIST\nDate: 24/8/23\n\nAim: To write a program in TensorFlow to compare results between ANN and CNN on MNIST.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load the Dataset.\n3) Pre-Process the datasets with the respected methods.\n4) Implement ANN and CNN.\n5) Compare the results.\n6) Plot the Inference.\n\nProgram :\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\n### ANN\ndata = keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = data.load_data()\nprint(x_train.shape, y_train.shape, x_test.shape, y_test.shape)\nx_train = x_train.reshape(60000, 784)\nx_test = x_test.reshape(10000, 784)\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\nx_train = scaler.fit_transform(x_train)\nx_test = scaler.transform(x_test)\nmodel = Sequential()\nmodel.add(Dense(128, activation='relu', input_shape = (784,)))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\nmetrics=['accuracy'])\nmodel.summary()\nmodel_history = model.fit(x_train, y_train, epochs=10, verbose=0)\nprint(\"Accuracy : \", model.evaluate(x_test, y_test, verbose=0)[1])\n### CNN\ndata = keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = data.load_data()\nprint(x_train.shape, y_train.shape, x_test.shape, y_test.shape)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(28, 28,\n1)))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\nmetrics=['accuracy'])\nmodel.summary()\nmodel_history = model.fit(x_train, y_train, epochs=10, verbose=0, batch_size=512)\nprint(\"Accuracy : \", model.evaluate(x_test, y_test, verbose=0)[1])\n\nResult: Thus, using TensorFlow library, CNN and ANN have been implemented and compared.\n\n\nEx. No: 7\nSentiment analysis using RNN\nDate: 31/8/23\n\nAim: To write a program in TensorFlow to perform Sentiment analysis using RNN.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load IMDB dataset.\n3) Pre-Process the Datasets\n4) Train a RNN model with the set of Hyper-Parameters.\n5) Display the accuracy.\n6) Plot the Accuracy metrices.\n\nProgram :\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport keras\n(train_x, train_y) , (test_x, test_y) =\nkeras.datasets.imdb.load_data(num_words=10000)\nprint(\"Shape\")\nprint(\"Train X : \", train_x.shape)\nprint(\"Train Y : \", train_y.shape)\nprint(\"Test X : \", test_x.shape)\nprint(\"Test Y : \", test_y.shape)\nprint(\"Maximum Value of word Index :\", max([max(i) for i in train_x]))\nprint(\"Maximum Length of word:\", max([len(i) for i in train_x]))\nfrom keras.preprocessing.sequence import pad_sequences\ntrain_x = pad_sequences(train_x, maxlen=269)\ntest_x = pad_sequences(test_x, maxlen=269)\nfrom keras.models import Sequential\nrnn = Sequential()\nrnn.add(keras.layers.Embedding(10000,32,input_length =269))\nrnn.add(keras.layers.SimpleRNN(16, activation=\"relu\"))\nrnn.add(keras.layers.Dense(1))\nrnn.add(keras.layers.Activation(\"sigmoid\"))\nprint(rnn.summary())\nrnn.compile(loss=\"binary_crossentropy\", optimizer=\"rmsprop\",\nmetrics=[\"accuracy\"])\nhistory = rnn.fit(train_x, train_y, epochs = 10, batch_size=128, verbose = 1)\nscore = rnn.evaluate(test_x, test_y, verbose=0)\nprint(\"Accuracy\", score[1]*100)\n\nResult: Thus, using TensorFlow library, RNN have been implemented to find sentiment.\n\n\nEx. No: 8\nLSTM vs GRU\nDate: 14/9/23\n\nAim: Load a text sequence and a vector sequence dataset of your choice and apply LSTM and\nGRU model to compare the accuracy and time complexity for both models.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load text sequence and vector sequence dataset.\n3) Apply Pre-Processing techniques.\n4) Train LSTM and GRU model.\n5) Compare the accuracy and time complexity.\n\nProgram :\n## Using Text Sequence\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\nData = pd.read_csv(\"training.csv\")\nTest = pd.read_csv('test.csv')\nData.head()\nTest.head()\ndef Pre(xyz):\nInput = [i for i in xyz]\nstop_words =\n['a','an','i','is','the','am','are','has','have','you','she','it','he','him','her\n','had','that','there','where','when','why'\\\n'while','though','this','can','go','so']\nI1 = [i1.split() for i1 in Input]\nI2 = [ [i for i in j if i not in stop_words] for j in I1]\nI1 =[]\nfor i in I2:\nfor j in i:\nif j not in I1:\nI1.append(j)\nreturn I1\nInpt = Data['text']\nout = Data['label']\nInput = [i for i in Data['text']]\nfrom sklearn.preprocessing import OneHotEncoder\nOE = OneHotEncoder()\nOut = OE.fit_transform(np.array(Data['label']).reshape(-1,1)).toarray()\nstop_words =\n['a','an','i','is','the','am','are','has','have','you','she','it','he','him','her\n','had','that','there','where','when','why'\\\n'while','though','this','can','go','so']\nI1 = [i1.split() for i1 in Input]\nI2 = [ [i for i in j if i not in stop_words] for j in I1]\nI1 =[]\nfor i in I2:\nfor j in i:\nif j not in I1:\nI1.append(j)\nSize = 300\nInput = pd.DataFrame(data = 0 , index =[i for i in\nrange(len(I2))],columns=I1[:Size])\nfor i in range(len(I2)):\nfor j in range(len(I2[i])):\nInput.iloc[i][I2[i][j]] = 1\nimport tensorflow as tf\nfrom keras.layers import LSTM,Dense,GRU\nfrom keras.models import Sequential\nfrom keras.losses import CategoricalCrossentropy\nmodel = Sequential()\nmodel.add(LSTM(units=100,activation='relu',input_shape=(1,Size),return_sequences=\nTrue))\nmodel.add(Dense(Out.shape[1],activation='softmax'))\nmodel.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'\n])\nmodel.summary()\nmodel.fit(Input.values.reshape(-1,1,Size),Out.reshape(-1,1,6),epochs=300)\nInput_test = Pre(Test['text'])\nInput_t = pd.DataFrame(data = 0 , index =[i for i in\nrange(len(I2))],columns=I1[:Size])\nfor i in range(len(Input_test)):\nfor j in range(len(Input_test[i])):\nInput_t.iloc[i][Input_test[i][j]] = 1\nOut = OE.fit_transform(np.array(Test['label']).reshape(-1,1)).toarray()\nmodel = Sequential()\nmodel.add(GRU(units=100,activation='relu',input_shape=(1,Size),return_sequences=T\nrue))\nmodel.add(Dense(Out.shape[1],activation='softmax'))\nmodel.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'\n])\nmodel.summary()\nmodel.fit(Input.values.reshape(-1,1,Size),Out.reshape(-1,1,6),epochs=300)\nfrom sklearn.datasets import load_digits\ndata = load_digits()\ninp.shape\ninp = data['data']\nout_vec = OE.fit_transform(data['target'].reshape(-1,1)).toarray()\nmodel = Sequential()\nmodel.add(LSTM(units=100,activation='relu',input_shape=(1,inp.shape[-\n1]),return_sequences=True))\nmodel.add(Dense(out_vec.shape[-1],activation='softmax'))\nmodel.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'\n])\nmodel.fit(inp.reshape(inp.shape[0],1,inp.shape[1]),out_vec.reshape(out_vec.shape[\n0],1,out_vec.shape[1]),epochs=100)\nmodel = Sequential()\nmodel.add(GRU(units=100,activation='relu',input_shape=(1,inp.shape[-\n1]),return_sequences=True))\nmodel.add(Dense(out_vec.shape[-1],activation='softmax'))\nmodel.compile(optimizer='adam',loss=CategoricalCrossentropy(),metrics=['accuracy'\n])\nmodel.fit(inp.reshape(inp.shape[0],1,inp.shape[1]),out_vec.reshape(out_vec.shape[\n0],1,out_vec.shape[1]),epochs=100)\n## END\nResult: Thus using TensorFlow library, LSTM and GRU have been implemented and\ncompared.\n\n\nEx. No: 9\nText Generation using Gated Recurrent Unit Networks\nDate: 21/9/23\n\nAim: To Generation text using Gated Recurrent Unit Networks.\n\nAlgorithm:\nStep 1: Importing the required libraries.\nStep 2: Loading the data into a string.\nStep 3: Creating a mapping from each unique character in the text to a unique number.\nStep 4: Pre-processing the data.\nStep 5: Building the GRU network.\nStep 6: Defining some helper functions which will be used during the training of the network.\na) Helper function to sample the next character:\nb) Helper function to generate text after each epoch\nc) Helper function to save the model after each epoch in which loss decreases\nd) Helper function to reduce the learning rate each time the learning plateaus\nStep 7: Training the GRU model (Take 15 epochs, batch size is individual decision (for e.g 128))\nStep 8: Generating new and random text.\n\nProgram:\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM, GRU\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import LambdaCallback\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import ReduceLROnPlateau\nimport random\nimport sys\nwith open('poems.txt', 'r') as file:\ntext = file.read()\nprint(text)\nvocabulary = sorted(list(set(text)))\nchar_to_indices = dict((c, i) for i, c in enumerate(vocabulary))\nindices_to_char = dict((i, c) for i, c in enumerate(vocabulary))\nprint(vocabulary)\nmax_length = 100\nsteps = 5\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - max_length, steps):\nsentences.append(text[i: i + max_length])\nnext_chars.append(text[i + max_length])\nX = np.zeros((len(sentences), max_length, len(vocabulary)), dtype = np.bool)\ny = np.zeros((len(sentences), len(vocabulary)), dtype = np.bool)\nfor i, sentence in enumerate(sentences):\nfor t, char in enumerate(sentence):\nX[i, t, char_to_indices[char]] = 1\ny[i, char_to_indices[next_chars[i]]] = 1\nmodel = Sequential()\nmodel.add(GRU(128, input_shape =(max_length, len(vocabulary))))\nmodel.add(Dense(len(vocabulary)))\nmodel.add(Activation('softmax'))\noptimizer = RMSprop(lr = 0.01)\nmodel.compile(loss ='categorical_crossentropy', optimizer = optimizer)\ndef sample_index(preds, temperature = 1.0):\npreds = np.asarray(preds).astype('float64')\npreds = np.log(preds) / temperature\nexp_preds = np.exp(preds)\npreds = exp_preds / np.sum(exp_preds)\nprobas = np.random.multinomial(1, preds, 1)\nreturn np.argmax(probas)\ndef on_epoch_end(epoch, logs):\nprint()\nprint('----- Generating text after Epoch: % d' % epoch)\nstart_index = random.randint(0, len(text) - max_length - 1)\nfor diversity in [0.2, 0.5, 1.0, 1.2]:\nprint('----- diversity:', diversity)\ngenerated = ''\nsentence = text[start_index: start_index + max_length]\ngenerated += sentence\nprint('----- Generating with seed: \"' + sentence + '\"')\nsys.stdout.write(generated)\nfor i in range(400):\nx_pred = np.zeros((1, max_length, len(vocabulary)))\nfor t, char in enumerate(sentence):\nx_pred[0, t, char_to_indices[char]] = 1.\npreds = model.predict(x_pred, verbose = 0)[0]\nnext_index = sample_index(preds, diversity)\nnext_char = indices_to_char[next_index]\ngenerated += next_char\nsentence = sentence[1:] + next_char\nsys.stdout.write(next_char)\nsys.stdout.flush()\nprint_callback = LambdaCallback(on_epoch_end = on_epoch_end)\nfilepath = \"weights.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor ='loss',\nverbose = 1, save_best_only = True,\nmode ='min')\nreduce_alpha = ReduceLROnPlateau(monitor ='loss', factor = 0.2,\npatience = 1, min_lr = 0.001)\ncallbacks = [print_callback, checkpoint, reduce_alpha]\nmodel.fit(X, y, batch_size = 128, epochs = 15, callbacks = callbacks)\ndef generate_text(length, diversity):\nstart_index = random.randint(0, len(text) - max_length - 1)\ngenerated = ''\nsentence = text[start_index: start_index + max_length]\ngenerated += sentence\nfor i in range(length):\nx_pred = np.zeros((1, max_length, len(vocabulary)))\nfor t, char in enumerate(sentence):\nx_pred[0, t, char_to_indices[char]] = 1.\npreds = model.predict(x_pred, verbose = 0)[0]\nnext_index = sample_index(preds, diversity)\nnext_char = indices_to_char[next_index]\ngenerated += next_char\nsentence = sentence[1:] + next_char\nreturn generated\nprint(generate_text(500, 0.2))\n\nResult: Thus, using TensorFlow library, GRU have been implemented to generate text.\n\n\nEx. No: 10\nObject Detection\nDate: 28/9/23\n\nAim: To use convolutional neural network (CNN) for object detection from CIFAR-10 dataset.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load CIFAR-10 dataset.\n3) Pre-process the Datasets\n4) Train a CNN model.\n5) Display the accuracy.\n\nProgram :\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\n(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()\nx_train = x_train/255\nx_test = x_test/255\ny_train = keras.utils.to_categorical(y_train)\ny_test = keras.utils.to_categorical(y_test)\n# Model Building\nmodel = keras.Sequential(\n[\nkeras.layers.Conv2D(filters=32,kernel_size=(5,5),padding=\"same\",activation=\"relu\"\n,input_shape=(32,32,3)),\nkeras.layers.MaxPooling2D((2,2)),\nkeras.layers.Conv2D(filters=64,kernel_size=(5,5),padding=\"same\",activation=\"relu\"\n),\nkeras.layers.MaxPooling2D((2,2)),\nkeras.layers.Conv2D(filters=128,kernel_size=(5,5),padding=\"same\",activation=\"relu\n\"),\nkeras.layers.MaxPooling2D((2,2)),\nkeras.layers.Flatten(),\nkeras.layers.Dense(512,activation=\"relu\"),\nkeras.layers.Dense(128,activation=\"relu\"),\nkeras.layers.Dense(10,activation=\"softmax\")\n])\n# Compile Model\nopt = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)\nmodel.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"acc\"])\n# Train Model\nmodel_history = model.fit(x_train,y_train,epochs=5, batch_size=4,\nvalidation_data=(x_test,y_test))\n# Accuracy\n_, accuracy = model.evaluate(x_test,y_test,verbose=0)\nprint(\"Accuracy :\", accuracy)\nmodel.summary()\n\nResult: Thus, using TensorFlow library, CNN have been implemented for object detection.\n\n\nEx. No: 11\nFraud Detection\nDate: 5/10/23\n\nAim: Credit card fraud detection using creditcard.csv dataset and using random forest. Print the\namount details for Normal Transaction, plot the Correlation Matrix, print accuracy,\nprecision, recall and F1 score, print confusion matrix.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load Credit Card Dataset.\n3) Pre-Process the datasets,\n4) Train a random forest classifier.\n5) Display the accuracy, precision, recall, F1 score and confusion matrix.\n\nProgram :\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\"creditcard.csv\")\nprint(\"Shape :\", df.shape)\ndf.head()\n# Columns\ndf.columns\ndf.describe()\n### Amount details for Normal Transaction\ndf[df[\"Class\"] == 0][\"Amount\"].describe()\n### DownSampling\ndf[\"Class\"].value_counts()\nfrom sklearn.utils import resample\nzero_df = resample(df[df[\"Class\"] == 0], n_samples=492)\nsample_df = pd.concat([df[df[\"Class\"] == 1], zero_df], ignore_index=True)\n### Correlation Matrix\nsample_df.corr()\n### Random Forest\nfrom sklearn.model_selection import train_test_split\nx = sample_df.iloc[:,:-1].values\ny = sample_df.iloc[:, -1].values\nx_train, x_test, y_train, y_test = train_test_split(x, y, random_state=69,\ntest_size=0.2)\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier()\nmodel.fit(x_train, y_train)\ny_pred = model.predict(x_test)\n### Classification Report\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(y_test, y_pred))\n\nResult: Thus, using Sklearn library, fraud detection have been implemented.\n\n\nEx. No: 12\nAutoencoder\nDate: 12/10/23\n\nAim: To implement vanilla autoencoder on MNIST dataset and calculate the loss vs. epoch\ncurve for training and validation set.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load MNIST dataset.\n3) Train the autoencoder model.\n4) Display the accuracy.\n\nProgram :\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import Dense, Input, Flatten, Reshape, LeakyReLU as\nLR, Activation, Dropout\nfrom tensorflow.keras.models import Model, Sequential\nfrom IPython import display\nfrom matplotlib import pyplot as plt\nimport numpy as np\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train/255.0\nx_test = x_test/255.0\nplt.imshow(x_train[0], cmap = \"gray\")\nplt.show()\nLATENT_SIZE = 32\nencoder = Sequential([\nFlatten(input_shape = (28, 28)),\nDense(512),\nLR(),\nDropout(0.5),\nDense(256),\nLR(),\nDropout(0.5),\nDense(128),\nLR(),\nDropout(0.5),\nDense(64),\nLR(),\nDropout(0.5),\nDense(LATENT_SIZE),\nLR()\n])\ndecoder = Sequential([\nDense(64, input_shape = (LATENT_SIZE,)),\nLR(),\nDropout(0.5),\nDense(128),\nLR(),\nDropout(0.5),\nDense(256),\nLR(),\nDropout(0.5),\nDense(512),\nLR(),\nDropout(0.5),\nDense(784),\nActivation(\"sigmoid\"),\nReshape((28, 28))\n])\nimg = Input(shape = (28, 28))\nlatent_vector = encoder(img)\noutput = decoder(latent_vector)\nmodel = Model(inputs = img, outputs = output)\nmodel.compile(\"nadam\", loss = \"binary_crossentropy\")\nEPOCHS = 10\nfor epoch in range(EPOCHS):\nfig, axs = plt.subplots(4, 4)\nrand = x_test[np.random.randint(0, 10000, 16)].reshape((4, 4, 1, 28, 28))\ndisplay.clear_output()\nfor i in range(4):\nfor j in range(4):\naxs[i, j].imshow(model.predict(rand[i, j])[0], cmap = \"gray\")\naxs[i, j].axis(\"off\")\nplt.subplots_adjust(wspace = 0, hspace = 0)\nplt.show()\nprint(\"-----------\", \"EPOCH\", epoch, \"-----------\")\nmodel.fit(x_train, x_train)\n\nResult: Thus, using TensorFlow library, Autoencoder has been implemented.\n\n\nEx. No: 13\nAnomaly Detection System\nDate: 19/10/23\n\nAim: To develop an anomaly detection system, which is trained by the normal data only but will\nbe used to identify the normal data and the anomaly data during testing.\n\nAlgorithm:\n1) Import the necessary packages.\n2) Load MNIST as normal data and Fashion MNIST as anomaly data.\n3) Pre-process the data.\n4) Train a CNN Autoencoder model.\n5) Now test the trained model with the anomaly data and display the outputs.\n6) Display the accuracy for anomaly data.\n\nProgram :\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom keras import datasets\ndigit = datasets.mnist.load_data()\nfashion = datasets.fashion_mnist.load_data()\n# Train-test Split - DIGIT\nx_train = digit[0][0]\nx_test = digit[1][0]\n# Fashion - Test\nfashion_test = fashion[1][0]\n# DIGIT\nx_train = x_train / 255\nx_test = x_test / 255\nx_train = x_train.reshape(len(x_train), 28, 28, 1)\nx_test = x_test.reshape(len(x_test), 28, 28, 1)\n# FASHION\nfashion_test = fashion_test / 255\nfashion_test = fashion_test.reshape(len(fashion_test), 28, 28, 1)\ndef plot_image(array, sample_size = 5):\nindex = 1\nplt.figure(figsize=(20, 4))\nfor i in np.random.choice(array.shape[0], size = sample_size):\nax = plt.subplot(2, 10, index)\nindex += 1\nplt.imshow(array[i].reshape(28, 28))\nplt.gray()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.show()\nplot_image(x_train)\nplot_image(fashion_test)\n### Convolutional Autoencoder\nimport keras\ninput_img = keras.Input(shape = (28, 28, 1))\nx = keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)\nx = keras.layers.MaxPooling2D((2, 2), padding='same')(x)\nx = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nx = keras.layers.MaxPooling2D((2, 2), padding='same')(x)\nx = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nencoded = keras.layers.MaxPooling2D((2, 2), padding='same')(x)\nx = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)\nx = keras.layers.UpSampling2D((2, 2))(x)\nx = keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)\nx = keras.layers.UpSampling2D((2, 2))(x)\nx = keras.layers.Conv2D(16, (3, 3), activation='relu')(x)\nx = keras.layers.UpSampling2D((2, 2))(x)\ndecoded = keras.layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)\nautoencoder = keras.Model(input_img, decoded)\nautoencoder.compile(optimizer='adam', loss='binary_crossentropy',metrics =\n['accuracy'])\nautoencoder.summary()\nautoencoder.fit(x_train, x_train, epochs=25, batch_size=64,\nvalidation_data=(x_test, x_test))\ndigit_predict = autoencoder.predict(x_test)\nfashion_predict = autoencoder.predict(fashion_test)\n# Regenerated Image\nplot_image(digit_predict)\nplot_image(fashion_predict)\n# Fixing Threshold\nreconstruction_error_digit = []\nreconstruction_error_fashion = []\nfor i in x_test[:100]:\nerror = autoencoder.evaluate([i], [i], verbose=0)[0]\nreconstruction_error_digit.append(error)\nfor i in fashion_test[:100]:\nerror = autoencoder.evaluate([i], [i], verbose=0)[0]\nreconstruction_error_fashion.append(error)\nreconstruction_error_digit = np.array(reconstruction_error_digit)\nreconstruction_error_fashion = np.array(reconstruction_error_fashion)\nthreshold = [reconstruction_error_digit.mean() -\n2*reconstruction_error_digit.std(), reconstruction_error_digit.mean() +\n2*reconstruction_error_digit.std()]\ndef detect_anomaly(image):\nerror = autoencoder.evaluate([image], [image], verbose=0)[0]\nif error >= threshold[0] and error <= threshold[1]:\nprint(\"Noice !!!\")\nreturn 0\nelse:\nprint(\"Anomaly Detected\")\nreturn 1\naccuracy = 0\nfor i in fashion_test[10:20]:\naccuracy += detect_anomaly(i)\nprint(\"Accuracy : \", accuracy)\n\nResult: Thus, using TensorFlow library, Anomaly detection system has been implemented.\n",
"bugtrack_url": null,
"license": "",
"summary": "A small example package",
"version": "0.0.1",
"project_urls": {
"Bug Tracker": "https://github.com/pypa/sampleproject/issues",
"Homepage": "https://github.com/pypa/sampleproject"
},
"split_keywords": [],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "2345a223bf7ea7592c8d363800f5ac494306facf764286f412073d17e43524d2",
"md5": "7c33d60dc15ed5dfc4242415f3587b04",
"sha256": "5154069726315bfee67cd5e2556296b3d039edb3464dd5fef01fec07d6894e3d"
},
"downloads": -1,
"filename": "snudlhelper-0.0.1-py3-none-any.whl",
"has_sig": false,
"md5_digest": "7c33d60dc15ed5dfc4242415f3587b04",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.7",
"size": 11337,
"upload_time": "2023-11-06T21:50:22",
"upload_time_iso_8601": "2023-11-06T21:50:22.853927Z",
"url": "https://files.pythonhosted.org/packages/23/45/a223bf7ea7592c8d363800f5ac494306facf764286f412073d17e43524d2/snudlhelper-0.0.1-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "6dacf4923fea46688f1911155d6b39291297a1bdb2061330a06be5ffa07a1c70",
"md5": "f21cfff584e0fd22830f71a6ee2c1931",
"sha256": "3269b6deb179c7894a323170836077516507ef937eddc894a5d7b39597b7fbb2"
},
"downloads": -1,
"filename": "snudlhelper-0.0.1.tar.gz",
"has_sig": false,
"md5_digest": "f21cfff584e0fd22830f71a6ee2c1931",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.7",
"size": 28651,
"upload_time": "2023-11-06T21:50:24",
"upload_time_iso_8601": "2023-11-06T21:50:24.760863Z",
"url": "https://files.pythonhosted.org/packages/6d/ac/f4923fea46688f1911155d6b39291297a1bdb2061330a06be5ffa07a1c70/snudlhelper-0.0.1.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2023-11-06 21:50:24",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "pypa",
"github_project": "sampleproject",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"tox": true,
"lcname": "snudlhelper"
}