import numpy as np
# Initialize weights with small values
def initialize_weights(input_size, output_size):
return np.random.randn(input_size, output_size) * 0.01 # Smaller weight initialization
# Binary output function (strict binary threshold)
def binary_output(x):
return np.where(x >= 0, 1, 0) # Strict binary output
# Training loop
def train_neuron(inputs, targets, epochs, learning_rate):
input_size = len(inputs[0]) # Number of features
output_size = len(targets[0]) # Number of outputs
weights = initialize_weights(input_size, output_size)
# Training loop
for epoch in range(epochs):
total_error = 0
for i in range(len(inputs)):
input_data = inputs[i]
target = targets[i]
# Forward pass: Calculate the weighted sum (without activation)
output = np.dot(input_data, weights)
# Apply the binary output threshold
predicted = binary_output(output)
# Calculate error (absolute error for binary output)
error = target - predicted
total_error += np.sum(np.abs(error)) # Sum of absolute error
# Backpropagation: Update weights based on error (Gradient Descent)
gradients = error[:, np.newaxis] * input_data # Gradient for each weight
weights += learning_rate * gradients # Update weights
# Optionally, clip gradients to prevent explosion
weights = np.clip(weights, -1, 1)
# Decaying learning rate for each epoch (optional)
learning_rate *= (1 / (1 + 0.01 * epoch)) # Decaying over epochs
if epoch % 100 == 0:
print(f"Epoch {epoch}, Error: {total_error / len(inputs)}")
return weights
# Input data (binary)
inputs = np.array([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
])
# Target data (binary XOR-like)
targets = np.array([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
])
# Train the model
weights = train_neuron(inputs, targets, epochs=2000, learning_rate=0.05)
# After training, print final weights and check predictions
print("Final weights:", weights)
for i in range(len(inputs)):
prediction = binary_output(np.dot(inputs[i], weights))
print(f"Input: {inputs[i]}, Prediction: {prediction}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIEluaXRpYWxpemUgd2VpZ2h0cyB3aXRoIHNtYWxsIHZhbHVlcwpkZWYgaW5pdGlhbGl6ZV93ZWlnaHRzKGlucHV0X3NpemUsIG91dHB1dF9zaXplKToKICAgIHJldHVybiBucC5yYW5kb20ucmFuZG4oaW5wdXRfc2l6ZSwgb3V0cHV0X3NpemUpICogMC4wMSAgIyBTbWFsbGVyIHdlaWdodCBpbml0aWFsaXphdGlvbgoKIyBCaW5hcnkgb3V0cHV0IGZ1bmN0aW9uIChzdHJpY3QgYmluYXJ5IHRocmVzaG9sZCkKZGVmIGJpbmFyeV9vdXRwdXQoeCk6CiAgICByZXR1cm4gbnAud2hlcmUoeCA+PSAwLCAxLCAwKSAgIyBTdHJpY3QgYmluYXJ5IG91dHB1dAoKIyBUcmFpbmluZyBsb29wCmRlZiB0cmFpbl9uZXVyb24oaW5wdXRzLCB0YXJnZXRzLCBlcG9jaHMsIGxlYXJuaW5nX3JhdGUpOgogICAgaW5wdXRfc2l6ZSA9IGxlbihpbnB1dHNbMF0pICAjIE51bWJlciBvZiBmZWF0dXJlcwogICAgb3V0cHV0X3NpemUgPSBsZW4odGFyZ2V0c1swXSkgICMgTnVtYmVyIG9mIG91dHB1dHMKICAgIHdlaWdodHMgPSBpbml0aWFsaXplX3dlaWdodHMoaW5wdXRfc2l6ZSwgb3V0cHV0X3NpemUpCiAgICAKICAgICMgVHJhaW5pbmcgbG9vcAogICAgZm9yIGVwb2NoIGluIHJhbmdlKGVwb2Nocyk6CiAgICAgICAgdG90YWxfZXJyb3IgPSAwCiAgICAgICAgZm9yIGkgaW4gcmFuZ2UobGVuKGlucHV0cykpOgogICAgICAgICAgICBpbnB1dF9kYXRhID0gaW5wdXRzW2ldCiAgICAgICAgICAgIHRhcmdldCA9IHRhcmdldHNbaV0KICAgICAgICAgICAgCiAgICAgICAgICAgICMgRm9yd2FyZCBwYXNzOiBDYWxjdWxhdGUgdGhlIHdlaWdodGVkIHN1bSAod2l0aG91dCBhY3RpdmF0aW9uKQogICAgICAgICAgICBvdXRwdXQgPSBucC5kb3QoaW5wdXRfZGF0YSwgd2VpZ2h0cykKICAgICAgICAgICAgCiAgICAgICAgICAgICMgQXBwbHkgdGhlIGJpbmFyeSBvdXRwdXQgdGhyZXNob2xkCiAgICAgICAgICAgIHByZWRpY3RlZCA9IGJpbmFyeV9vdXRwdXQob3V0cHV0KQogICAgICAgICAgICAKICAgICAgICAgICAgIyBDYWxjdWxhdGUgZXJyb3IgKGFic29sdXRlIGVycm9yIGZvciBiaW5hcnkgb3V0cHV0KQogICAgICAgICAgICBlcnJvciA9IHRhcmdldCAtIHByZWRpY3RlZAogICAgICAgICAgICB0b3RhbF9lcnJvciArPSBucC5zdW0obnAuYWJzKGVycm9yKSkgICMgU3VtIG9mIGFic29sdXRlIGVycm9yCiAgICAgICAgICAgIAogICAgICAgICAgICAjIEJhY2twcm9wYWdhdGlvbjogVXBkYXRlIHdlaWdodHMgYmFzZWQgb24gZXJyb3IgKEdyYWRpZW50IERlc2NlbnQpCiAgICAgICAgICAgIGdyYWRpZW50cyA9IGVycm9yWzosIG5wLm5ld2F4aXNdICogaW5wdXRfZGF0YSAgIyBHcmFkaWVudCBmb3IgZWFjaCB3ZWlnaHQKICAgICAgICAgICAgd2VpZ2h0cyArPSBsZWFybmluZ19yYXRlICogZ3JhZGllbnRzICAjIFVwZGF0ZSB3ZWlnaHRzCiAgICAgICAgICAgIAogICAgICAgICAgICAjIE9wdGlvbmFsbHksIGNsaXAgZ3JhZGllbnRzIHRvIHByZXZlbnQgZXhwbG9zaW9uCiAgICAgICAgICAgIHdlaWdodHMgPSBucC5jbGlwKHdlaWdodHMsIC0xLCAxKQogICAgICAgIAogICAgICAgICMgRGVjYXlpbmcgbGVhcm5pbmcgcmF0ZSBmb3IgZWFjaCBlcG9jaCAob3B0aW9uYWwpCiAgICAgICAgbGVhcm5pbmdfcmF0ZSAqPSAoMSAvICgxICsgMC4wMSAqIGVwb2NoKSkgICMgRGVjYXlpbmcgb3ZlciBlcG9jaHMKICAgICAgICAKICAgICAgICBpZiBlcG9jaCAlIDEwMCA9PSAwOgogICAgICAgICAgICBwcmludChmIkVwb2NoIHtlcG9jaH0sIEVycm9yOiB7dG90YWxfZXJyb3IgLyBsZW4oaW5wdXRzKX0iKQoKICAgIHJldHVybiB3ZWlnaHRzCgojIElucHV0IGRhdGEgKGJpbmFyeSkKaW5wdXRzID0gbnAuYXJyYXkoWwogICAgWzAsIDAsIDAsIDBdLAogICAgWzAsIDAsIDAsIDFdLAogICAgWzAsIDAsIDEsIDBdLAogICAgWzAsIDAsIDEsIDFdLAogICAgWzAsIDEsIDAsIDBdLAogICAgWzAsIDEsIDAsIDFdLAogICAgWzAsIDEsIDEsIDBdLAogICAgWzAsIDEsIDEsIDFdLAogICAgWzEsIDAsIDAsIDBdLAogICAgWzEsIDAsIDAsIDFdLAogICAgWzEsIDAsIDEsIDBdLAogICAgWzEsIDAsIDEsIDFdLAogICAgWzEsIDEsIDAsIDBdLAogICAgWzEsIDEsIDAsIDFdLAogICAgWzEsIDEsIDEsIDBdLAogICAgWzEsIDEsIDEsIDFdCl0pCgojIFRhcmdldCBkYXRhIChiaW5hcnkgWE9SLWxpa2UpCnRhcmdldHMgPSBucC5hcnJheShbCiAgICBbMCwgMCwgMCwgMF0sCiAgICBbMCwgMCwgMCwgMV0sCiAgICBbMCwgMCwgMSwgMF0sCiAgICBbMCwgMCwgMSwgMV0sCiAgICBbMCwgMSwgMCwgMF0sCiAgICBbMCwgMSwgMCwgMV0sCiAgICBbMCwgMSwgMSwgMF0sCiAgICBbMCwgMSwgMSwgMV0sCiAgICBbMSwgMCwgMCwgMF0sCiAgICBbMSwgMCwgMCwgMV0sCiAgICBbMSwgMCwgMSwgMF0sCiAgICBbMSwgMCwgMSwgMV0sCiAgICBbMSwgMSwgMCwgMF0sCiAgICBbMSwgMSwgMCwgMV0sCiAgICBbMSwgMSwgMSwgMF0sCiAgICBbMSwgMSwgMSwgMV0KXSkKCiMgVHJhaW4gdGhlIG1vZGVsCndlaWdodHMgPSB0cmFpbl9uZXVyb24oaW5wdXRzLCB0YXJnZXRzLCBlcG9jaHM9MjAwMCwgbGVhcm5pbmdfcmF0ZT0wLjA1KQoKIyBBZnRlciB0cmFpbmluZywgcHJpbnQgZmluYWwgd2VpZ2h0cyBhbmQgY2hlY2sgcHJlZGljdGlvbnMKcHJpbnQoIkZpbmFsIHdlaWdodHM6Iiwgd2VpZ2h0cykKZm9yIGkgaW4gcmFuZ2UobGVuKGlucHV0cykpOgogICAgcHJlZGljdGlvbiA9IGJpbmFyeV9vdXRwdXQobnAuZG90KGlucHV0c1tpXSwgd2VpZ2h0cykpCiAgICBwcmludChmIklucHV0OiB7aW5wdXRzW2ldfSwgUHJlZGljdGlvbjoge3ByZWRpY3Rpb259IikK