#Implementation of OR operation using McCulloch Pitts Network.
inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
w1, w2 = 1, 1
theta = 1
def macculloch_pitts(x1, x2):
net = x1 * w1 + x2 * w2
if net >= theta:
return 1
else:
return 0
for x1, x2 in inputs:
output = macculloch_pitts(x1, x2)
print(f"Input: ({x1}, {x2}) Output: {output}")
Input: (0, 0) Output: 0
Input: (0, 1) Output: 1
Input: (1, 0) Output: 1
Input: (1, 1) Output: 1
---------------------------------------------
#Implementation of character recognization using Hebb Neural Network
x1 = [1, -1, 1, 1]
x2 = [1, 1, -1, -1]
x3 = [1, -1, 1, -1]
x4 = [1, -1, -1, -1]
target = [1, -1, -1, 1]
bias = 1
w1 = w2 = w3 = w4 = b1 = 0
for i in range(len(target)):
w1 += x1[i] + target[i]
w2 += x2[i] + target[i]
w3 += x3[i] + target[i]
w4 += x4[i] + target[i]
b1 += bias + target[i]
print("Output Weights")
print(f"w1:{w1}\tw2:{w2}\tw3:{w3}\tw4:{w4}\tb1:{b1}")
Output Weights
w1:2 w2:0 w3:0 w4:-2 b1:4----------------------------------------------------------------
#Implementation of And operation using perceptron
inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
bias = -1.5
w1, w2 = 1, 1
def AND(x1, x2):
weight_sum = x1*w1 + x2*w2 + bias
if weight_sum > 0:
return 1
else:
return 0
for x1, x2 in inputs:
print(f"inputs:{(x1, x2)}: Output:{AND(x1, x2)}")
inputs:(0, 0): Output:0
inputs:(0, 1): Output:0
inputs:(1, 0): Output:0
inputs:(1, 1): Output:1
--------------------------------------------------------------
#Implementation of OR operation using Adaline
inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
target = [0, 1,1,1]
w1, w2 = 0.1, 0.1
bias = 0.1
learning_rate = 0.1
for epoch in range(10):
for i in range(len(inputs)):
x1, x2 = inputs[i]
net = w1*x1+w2*x2+bias
error = target[i] - net
w1 += learning_rate * error * x1
w2 += learning_rate * error * x2
bias += learning_rate * error
print("Output Weights")
print(f"w1:{w1}\tw2:{w2}\tb1:{bias}")
for x1, x2 in inputs:
total = w1*x1+w2*x2+bias
print(f"{x1} OR {x2} = {1 if total > 0.5 else 0}")
Output Weights
w1:0.37115751470707536 w2:0.39271118094647023 b1:0.3804210927869522
0 OR 0 = 0
0 OR 1 = 1
1 OR 0 = 1
1 OR 1 = 1
------------------------------------------------------------------------
import numpy as np
# Sigmoid activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Derivative of sigmoid (for weight updates)
def sigmoid_derivative(x):
return x * (1 - x)
# XOR input and output pairs
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
target = np.array([[0], [1], [1], [0]]) # XOR truth table
# Initialize weights randomly
np.random.seed(42) # For reproducibility
w_hidden = np.random.uniform(-1, 1, (2, 2)) # 2 hidden neurons, 2 input features
b_hidden = np.random.uniform(-1, 1, (1, 2)) # Bias for hidden neurons
w_output = np.random.uniform(-1, 1, (2, 1)) # 2 hidden neurons → 1 output neuron
b_output = np.random.uniform(-1, 1, (1, 1)) # Bias for output neuron
learning_rate = 0.5
epochs = 10000
for epoch in range(epochs):
hidden_input = np.dot(inputs, w_hidden) + b_hidden
hidden_output = sigmoid(hidden_input)
final_input = np.dot(hidden_output, w_output) + b_output
final_output = sigmoid(final_input)
error = target - final_output
d_output = error * sigmoid_derivative(final_output)
d_hidden = np.dot(d_output, w_output.T) * sigmoid_derivative(hidden_output)
w_output += np.dot(hidden_output.T, d_output) * learning_rate
b_output += np.sum(d_output, axis=0, keepdims=True) * learning_rate
w_hidden += np.dot(inputs.T, d_hidden) * learning_rate
b_hidden += np.sum(d_hidden, axis=0, keepdims=True) * learning_rate
print("Final Weights:")
print(f"w_hidden:\n{w_hidden}\n")
print(f"b_hidden:\n{b_hidden}\n")
print(f"w_output:\n{w_output}\n")
print(f"b_output:\n{b_output}\n")
print("XOR Predictions:")
for i in range(len(inputs)):
hidden_input = np.dot(inputs[i], w_hidden) + b_hidden
hidden_output = sigmoid(hidden_input)
final_input = np.dot(hidden_output, w_output) + b_output
final_output = sigmoid(final_input)
predicted_output = 1 if final_output > 0.5 else 0
print(f"{inputs[i][0]} XOR {inputs[i][1]} = {predicted_output}")
Final Weights:
w_hidden:
[[4.70178466 6.32396024]
[4.70124355 6.32206278]]
b_hidden:
[[-7.21504084 -2.80630971]]
w_output:
[[-10.19219823]
[ 9.58479322]]
b_output:
[[-4.46643253]]
XOR Predictions:
0 XOR 0 = 0
0 XOR 1 = 1
1 XOR 0 = 1
1 XOR 1 = 0---------------------------------------------------------------------------------
#Implementation of Autoencoder.
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.reshape(-1, 784), x_test.reshape(-1, 784)
input_layer = keras.Input(shape=(784,))
encoded = keras.layers.Dense(32, activation="relu")(input_layer)
decoded = keras.layers.Dense(784, activation="sigmoid")(encoded)
autoencoder = keras.Model(input_layer, decoded)
autoencoder.compile(optimizer="adam", loss="mse")
autoencoder.fit(x_train, x_train, epochs=10, batch_size=256, validation_data=(x_test, x_test))
reconstructed = autoencoder.predict(x_test[:10])
fig, axes = plt.subplots(2, 10, figsize=(10, 2))
for i in range(10):
axes[0, i].imshow(x_test[i].reshape(28, 28), cmap="gray")
axes[0, i].axis("off")
axes[1, i].imshow(reconstructed[i].reshape(28, 28), cmap="gray")
axes[1, i].axis("off")
plt.show()
#7. Implement the image denoising using Convolutional Autoencoder.
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
input_layer = keras.Input(shape=(28, 28, 1))
x = keras.layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input_layer)
x = keras.layers.MaxPooling2D((2, 2), padding="same")(x)
x = keras.layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = keras.layers.MaxPooling2D((2, 2), padding="same")(x)
x = keras.layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = keras.layers.UpSampling2D((2, 2))(x)
x = keras.layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = keras.layers.UpSampling2D((2, 2))(x)
output_layer = keras.layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x)
autoencoder = keras.Model(input_layer, output_layer)
autoencoder.compile(optimizer="adam", loss="mse")
autoencoder.fit(x_train_noisy, x_train, epochs=10, batch_size=256, validation_data=(x_test_noisy, x_test))
denoised_images = autoencoder.predict(x_test_noisy[:10])
fig, axes = plt.subplots(3, 10, figsize=(10, 3))
for i in range(10):
axes[0, i].imshow(x_test[i].reshape(28, 28), cmap="gray")
axes[0, i].axis("off")
axes[1, i].imshow(x_test_noisy[i].reshape(28, 28), cmap="gray")
axes[1, i].axis("off")
axes[2, i].imshow(denoised_images[i].reshape(28, 28), cmap="gray")
axes[2, i].axis("off")
plt.show()