import numpy as np
# Initialization of weights
def initialize_weights(input_size, output_size):
return np.random.randn(input_size, output_size) * 0.1 # Smaller weight initialization
# Simple activation function
def binary_output(x):
return np.where(x >= 0.5, 1, 0) # Threshold at 0.5
# Training loop
def train_neuron(inputs, targets, epochs, learning_rate):
input_size = len(inputs[0]) # Number of features
output_size = len(targets[0]) # Number of outputs
weights = initialize_weights(input_size, output_size)
# Training loop
for epoch in range(epochs):
total_error = 0
for i in range(len(inputs)):
input_data = inputs[i]
target = targets[i]
# Forward pass: Calculate the weighted sum (without activation)
output = np.dot(input_data, weights)
# Apply the binary output threshold
predicted = binary_output(output)
# Calculate error
error = target - predicted
total_error += np.sum(np.abs(error)) # Sum of absolute error
# Backpropagation: Update weights based on error (Gradient Descent)
gradients = error[:, np.newaxis] * input_data # Gradient for each weight
weights += learning_rate * gradients # Update weights
# Optionally, clip gradients to prevent explosion
weights = np.clip(weights, -1, 1)
# Decaying learning rate for each epoch (optional)
learning_rate *= (1 / (1 + 0.01 * epoch)) # Decaying over epochs
if epoch % 100 == 0:
print(f"Epoch {epoch}, Error: {total_error / len(inputs)}")
return weights
# Input data (binary)
inputs = np.array([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
])
# Target data (binary XOR-like)
targets = np.array([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
])
# Train the model
weights = train_neuron(inputs, targets, epochs=2000, learning_rate=0.1)
# After training, print final weights and check predictions
print("Final weights:", weights)
for i in range(len(inputs)):
prediction = binary_output(np.dot(inputs[i], weights))
print(f"Input: {inputs[i]}, Prediction: {prediction}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIEluaXRpYWxpemF0aW9uIG9mIHdlaWdodHMKZGVmIGluaXRpYWxpemVfd2VpZ2h0cyhpbnB1dF9zaXplLCBvdXRwdXRfc2l6ZSk6CiAgICByZXR1cm4gbnAucmFuZG9tLnJhbmRuKGlucHV0X3NpemUsIG91dHB1dF9zaXplKSAqIDAuMSAgIyBTbWFsbGVyIHdlaWdodCBpbml0aWFsaXphdGlvbgoKIyBTaW1wbGUgYWN0aXZhdGlvbiBmdW5jdGlvbgpkZWYgYmluYXJ5X291dHB1dCh4KToKICAgIHJldHVybiBucC53aGVyZSh4ID49IDAuNSwgMSwgMCkgICMgVGhyZXNob2xkIGF0IDAuNQoKIyBUcmFpbmluZyBsb29wCmRlZiB0cmFpbl9uZXVyb24oaW5wdXRzLCB0YXJnZXRzLCBlcG9jaHMsIGxlYXJuaW5nX3JhdGUpOgogICAgaW5wdXRfc2l6ZSA9IGxlbihpbnB1dHNbMF0pICAjIE51bWJlciBvZiBmZWF0dXJlcwogICAgb3V0cHV0X3NpemUgPSBsZW4odGFyZ2V0c1swXSkgICMgTnVtYmVyIG9mIG91dHB1dHMKICAgIHdlaWdodHMgPSBpbml0aWFsaXplX3dlaWdodHMoaW5wdXRfc2l6ZSwgb3V0cHV0X3NpemUpCiAgICAKICAgICMgVHJhaW5pbmcgbG9vcAogICAgZm9yIGVwb2NoIGluIHJhbmdlKGVwb2Nocyk6CiAgICAgICAgdG90YWxfZXJyb3IgPSAwCiAgICAgICAgZm9yIGkgaW4gcmFuZ2UobGVuKGlucHV0cykpOgogICAgICAgICAgICBpbnB1dF9kYXRhID0gaW5wdXRzW2ldCiAgICAgICAgICAgIHRhcmdldCA9IHRhcmdldHNbaV0KICAgICAgICAgICAgCiAgICAgICAgICAgICMgRm9yd2FyZCBwYXNzOiBDYWxjdWxhdGUgdGhlIHdlaWdodGVkIHN1bSAod2l0aG91dCBhY3RpdmF0aW9uKQogICAgICAgICAgICBvdXRwdXQgPSBucC5kb3QoaW5wdXRfZGF0YSwgd2VpZ2h0cykKICAgICAgICAgICAgCiAgICAgICAgICAgICMgQXBwbHkgdGhlIGJpbmFyeSBvdXRwdXQgdGhyZXNob2xkCiAgICAgICAgICAgIHByZWRpY3RlZCA9IGJpbmFyeV9vdXRwdXQob3V0cHV0KQogICAgICAgICAgICAKICAgICAgICAgICAgIyBDYWxjdWxhdGUgZXJyb3IKICAgICAgICAgICAgZXJyb3IgPSB0YXJnZXQgLSBwcmVkaWN0ZWQKICAgICAgICAgICAgdG90YWxfZXJyb3IgKz0gbnAuc3VtKG5wLmFicyhlcnJvcikpICAjIFN1bSBvZiBhYnNvbHV0ZSBlcnJvcgogICAgICAgICAgICAKICAgICAgICAgICAgIyBCYWNrcHJvcGFnYXRpb246IFVwZGF0ZSB3ZWlnaHRzIGJhc2VkIG9uIGVycm9yIChHcmFkaWVudCBEZXNjZW50KQogICAgICAgICAgICBncmFkaWVudHMgPSBlcnJvcls6LCBucC5uZXdheGlzXSAqIGlucHV0X2RhdGEgICMgR3JhZGllbnQgZm9yIGVhY2ggd2VpZ2h0CiAgICAgICAgICAgIHdlaWdodHMgKz0gbGVhcm5pbmdfcmF0ZSAqIGdyYWRpZW50cyAgIyBVcGRhdGUgd2VpZ2h0cwogICAgICAgICAgICAKICAgICAgICAgICAgIyBPcHRpb25hbGx5LCBjbGlwIGdyYWRpZW50cyB0byBwcmV2ZW50IGV4cGxvc2lvbgogICAgICAgICAgICB3ZWlnaHRzID0gbnAuY2xpcCh3ZWlnaHRzLCAtMSwgMSkKICAgICAgICAKICAgICAgICAjIERlY2F5aW5nIGxlYXJuaW5nIHJhdGUgZm9yIGVhY2ggZXBvY2ggKG9wdGlvbmFsKQogICAgICAgIGxlYXJuaW5nX3JhdGUgKj0gKDEgLyAoMSArIDAuMDEgKiBlcG9jaCkpICAjIERlY2F5aW5nIG92ZXIgZXBvY2hzCiAgICAgICAgCiAgICAgICAgaWYgZXBvY2ggJSAxMDAgPT0gMDoKICAgICAgICAgICAgcHJpbnQoZiJFcG9jaCB7ZXBvY2h9LCBFcnJvcjoge3RvdGFsX2Vycm9yIC8gbGVuKGlucHV0cyl9IikKCiAgICByZXR1cm4gd2VpZ2h0cwoKIyBJbnB1dCBkYXRhIChiaW5hcnkpCmlucHV0cyA9IG5wLmFycmF5KFsKICAgIFswLCAwLCAwLCAwXSwKICAgIFswLCAwLCAwLCAxXSwKICAgIFswLCAwLCAxLCAwXSwKICAgIFswLCAwLCAxLCAxXSwKICAgIFswLCAxLCAwLCAwXSwKICAgIFswLCAxLCAwLCAxXSwKICAgIFswLCAxLCAxLCAwXSwKICAgIFswLCAxLCAxLCAxXSwKICAgIFsxLCAwLCAwLCAwXSwKICAgIFsxLCAwLCAwLCAxXSwKICAgIFsxLCAwLCAxLCAwXSwKICAgIFsxLCAwLCAxLCAxXSwKICAgIFsxLCAxLCAwLCAwXSwKICAgIFsxLCAxLCAwLCAxXSwKICAgIFsxLCAxLCAxLCAwXSwKICAgIFsxLCAxLCAxLCAxXQpdKQoKIyBUYXJnZXQgZGF0YSAoYmluYXJ5IFhPUi1saWtlKQp0YXJnZXRzID0gbnAuYXJyYXkoWwogICAgWzAsIDAsIDAsIDBdLAogICAgWzAsIDAsIDAsIDFdLAogICAgWzAsIDAsIDEsIDBdLAogICAgWzAsIDAsIDEsIDFdLAogICAgWzAsIDEsIDAsIDBdLAogICAgWzAsIDEsIDAsIDFdLAogICAgWzAsIDEsIDEsIDBdLAogICAgWzAsIDEsIDEsIDFdLAogICAgWzEsIDAsIDAsIDBdLAogICAgWzEsIDAsIDAsIDFdLAogICAgWzEsIDAsIDEsIDBdLAogICAgWzEsIDAsIDEsIDFdLAogICAgWzEsIDEsIDAsIDBdLAogICAgWzEsIDEsIDAsIDFdLAogICAgWzEsIDEsIDEsIDBdLAogICAgWzEsIDEsIDEsIDFdCl0pCgojIFRyYWluIHRoZSBtb2RlbAp3ZWlnaHRzID0gdHJhaW5fbmV1cm9uKGlucHV0cywgdGFyZ2V0cywgZXBvY2hzPTIwMDAsIGxlYXJuaW5nX3JhdGU9MC4xKQoKIyBBZnRlciB0cmFpbmluZywgcHJpbnQgZmluYWwgd2VpZ2h0cyBhbmQgY2hlY2sgcHJlZGljdGlvbnMKcHJpbnQoIkZpbmFsIHdlaWdodHM6Iiwgd2VpZ2h0cykKZm9yIGkgaW4gcmFuZ2UobGVuKGlucHV0cykpOgogICAgcHJlZGljdGlvbiA9IGJpbmFyeV9vdXRwdXQobnAuZG90KGlucHV0c1tpXSwgd2VpZ2h0cykpCiAgICBwcmludChmIklucHV0OiB7aW5wdXRzW2ldfSwgUHJlZGljdGlvbjoge3ByZWRpY3Rpb259IikK