import numpy as np
# Define your activation function (you mentioned no sigmoid, likely linear)
def activation_function(x):
# No transformation (identity function)
return x
# Initialize weights with small random values
def initialize_weights(input_size, output_size):
# Proper small initialization for faster convergence
return np.random.randn(input_size, output_size) * 0.1
# Forward pass
def forward_pass(input_vector, weights, biases):
return np.dot(input_vector, weights) + biases
# Training function
def train_neural_network(input_data, target_data, learning_rate=0.001, max_epochs=2000, error_threshold=1e-5):
input_size = len(input_data[0])
output_size = len(target_data[0])
# Initialize weights and biases
weights = initialize_weights(input_size, output_size)
biases = np.zeros(output_size)
# Training loop
for epoch in range(max_epochs):
total_error = 0
for i in range(len(input_data)):
input_vector = input_data[i]
target_vector = target_data[i]
# Forward pass
output = forward_pass(input_vector, weights, biases)
error = target_vector - output
total_error += np.sum(error ** 2)
# Backpropagation (gradient descent)
weight_gradient = np.outer(input_vector, error) # Gradient w.r.t weights
bias_gradient = error # Gradient w.r.t bias
# Gradient clipping to avoid exploding gradients
weight_gradient = np.clip(weight_gradient, -10, 10)
bias_gradient = np.clip(bias_gradient, -10, 10)
# Check for NaN in gradients and skip update if found
if np.isnan(np.sum(weight_gradient)) or np.isnan(np.sum(bias_gradient)):
print("NaN detected, skipping update.")
continue
# Update weights and biases
weights += learning_rate * weight_gradient
biases += learning_rate * bias_gradient
# Early stopping: check if the error is below the threshold
if total_error < error_threshold:
print(f"Converged at epoch {epoch} with error: {total_error}")
break
# Optional: Print status for every 100 epochs
if epoch % 100 == 0:
print(f"Epoch {epoch}, Error: {total_error}")
return weights, biases
# Test the network with the provided truth tables
def test_neural_network(weights, biases, input_data):
predictions = []
for input_vector in input_data:
output = forward_pass(input_vector, weights, biases)
predictions.append(output)
return predictions
# Define the truth tables
input_data = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
]
# Corresponding targets (for the XOR problem)
target_data = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
]
# Train the neural network
weights, biases = train_neural_network(input_data, target_data, learning_rate=0.001)
# Test the neural network
predictions = test_neural_network(weights, biases, input_data)
# Display the results
for i, (input_vector, target_vector, prediction) in enumerate(zip(input_data, target_data, predictions)):
print(f"Table {i+1}: Input: {input_vector}, Target: {target_vector}, Prediction: {prediction}, Error: {np.abs(np.array(target_vector) - np.array(prediction))}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIERlZmluZSB5b3VyIGFjdGl2YXRpb24gZnVuY3Rpb24gKHlvdSBtZW50aW9uZWQgbm8gc2lnbW9pZCwgbGlrZWx5IGxpbmVhcikKZGVmIGFjdGl2YXRpb25fZnVuY3Rpb24oeCk6CiAgICAjIE5vIHRyYW5zZm9ybWF0aW9uIChpZGVudGl0eSBmdW5jdGlvbikKICAgIHJldHVybiB4CgojIEluaXRpYWxpemUgd2VpZ2h0cyB3aXRoIHNtYWxsIHJhbmRvbSB2YWx1ZXMKZGVmIGluaXRpYWxpemVfd2VpZ2h0cyhpbnB1dF9zaXplLCBvdXRwdXRfc2l6ZSk6CiAgICAjIFByb3BlciBzbWFsbCBpbml0aWFsaXphdGlvbiBmb3IgZmFzdGVyIGNvbnZlcmdlbmNlCiAgICByZXR1cm4gbnAucmFuZG9tLnJhbmRuKGlucHV0X3NpemUsIG91dHB1dF9zaXplKSAqIDAuMQoKIyBGb3J3YXJkIHBhc3MKZGVmIGZvcndhcmRfcGFzcyhpbnB1dF92ZWN0b3IsIHdlaWdodHMsIGJpYXNlcyk6CiAgICByZXR1cm4gbnAuZG90KGlucHV0X3ZlY3Rvciwgd2VpZ2h0cykgKyBiaWFzZXMKCiMgVHJhaW5pbmcgZnVuY3Rpb24KZGVmIHRyYWluX25ldXJhbF9uZXR3b3JrKGlucHV0X2RhdGEsIHRhcmdldF9kYXRhLCBsZWFybmluZ19yYXRlPTAuMDAxLCBtYXhfZXBvY2hzPTIwMDAsIGVycm9yX3RocmVzaG9sZD0xZS01KToKICAgIGlucHV0X3NpemUgPSBsZW4oaW5wdXRfZGF0YVswXSkKICAgIG91dHB1dF9zaXplID0gbGVuKHRhcmdldF9kYXRhWzBdKQogICAgCiAgICAjIEluaXRpYWxpemUgd2VpZ2h0cyBhbmQgYmlhc2VzCiAgICB3ZWlnaHRzID0gaW5pdGlhbGl6ZV93ZWlnaHRzKGlucHV0X3NpemUsIG91dHB1dF9zaXplKQogICAgYmlhc2VzID0gbnAuemVyb3Mob3V0cHV0X3NpemUpCgogICAgIyBUcmFpbmluZyBsb29wCiAgICBmb3IgZXBvY2ggaW4gcmFuZ2UobWF4X2Vwb2Nocyk6CiAgICAgICAgdG90YWxfZXJyb3IgPSAwCiAgICAgICAgZm9yIGkgaW4gcmFuZ2UobGVuKGlucHV0X2RhdGEpKToKICAgICAgICAgICAgaW5wdXRfdmVjdG9yID0gaW5wdXRfZGF0YVtpXQogICAgICAgICAgICB0YXJnZXRfdmVjdG9yID0gdGFyZ2V0X2RhdGFbaV0KCiAgICAgICAgICAgICMgRm9yd2FyZCBwYXNzCiAgICAgICAgICAgIG91dHB1dCA9IGZvcndhcmRfcGFzcyhpbnB1dF92ZWN0b3IsIHdlaWdodHMsIGJpYXNlcykKICAgICAgICAgICAgZXJyb3IgPSB0YXJnZXRfdmVjdG9yIC0gb3V0cHV0CiAgICAgICAgICAgIHRvdGFsX2Vycm9yICs9IG5wLnN1bShlcnJvciAqKiAyKQoKICAgICAgICAgICAgIyBCYWNrcHJvcGFnYXRpb24gKGdyYWRpZW50IGRlc2NlbnQpCiAgICAgICAgICAgIHdlaWdodF9ncmFkaWVudCA9IG5wLm91dGVyKGlucHV0X3ZlY3RvciwgZXJyb3IpICAjIEdyYWRpZW50IHcuci50IHdlaWdodHMKICAgICAgICAgICAgYmlhc19ncmFkaWVudCA9IGVycm9yICAjIEdyYWRpZW50IHcuci50IGJpYXMKCiAgICAgICAgICAgICMgR3JhZGllbnQgY2xpcHBpbmcgdG8gYXZvaWQgZXhwbG9kaW5nIGdyYWRpZW50cwogICAgICAgICAgICB3ZWlnaHRfZ3JhZGllbnQgPSBucC5jbGlwKHdlaWdodF9ncmFkaWVudCwgLTEwLCAxMCkKICAgICAgICAgICAgYmlhc19ncmFkaWVudCA9IG5wLmNsaXAoYmlhc19ncmFkaWVudCwgLTEwLCAxMCkKCiAgICAgICAgICAgICMgQ2hlY2sgZm9yIE5hTiBpbiBncmFkaWVudHMgYW5kIHNraXAgdXBkYXRlIGlmIGZvdW5kCiAgICAgICAgICAgIGlmIG5wLmlzbmFuKG5wLnN1bSh3ZWlnaHRfZ3JhZGllbnQpKSBvciBucC5pc25hbihucC5zdW0oYmlhc19ncmFkaWVudCkpOgogICAgICAgICAgICAgICAgcHJpbnQoIk5hTiBkZXRlY3RlZCwgc2tpcHBpbmcgdXBkYXRlLiIpCiAgICAgICAgICAgICAgICBjb250aW51ZQoKICAgICAgICAgICAgIyBVcGRhdGUgd2VpZ2h0cyBhbmQgYmlhc2VzCiAgICAgICAgICAgIHdlaWdodHMgKz0gbGVhcm5pbmdfcmF0ZSAqIHdlaWdodF9ncmFkaWVudAogICAgICAgICAgICBiaWFzZXMgKz0gbGVhcm5pbmdfcmF0ZSAqIGJpYXNfZ3JhZGllbnQKCiAgICAgICAgIyBFYXJseSBzdG9wcGluZzogY2hlY2sgaWYgdGhlIGVycm9yIGlzIGJlbG93IHRoZSB0aHJlc2hvbGQKICAgICAgICBpZiB0b3RhbF9lcnJvciA8IGVycm9yX3RocmVzaG9sZDoKICAgICAgICAgICAgcHJpbnQoZiJDb252ZXJnZWQgYXQgZXBvY2gge2Vwb2NofSB3aXRoIGVycm9yOiB7dG90YWxfZXJyb3J9IikKICAgICAgICAgICAgYnJlYWsKICAgICAgICAKICAgICAgICAjIE9wdGlvbmFsOiBQcmludCBzdGF0dXMgZm9yIGV2ZXJ5IDEwMCBlcG9jaHMKICAgICAgICBpZiBlcG9jaCAlIDEwMCA9PSAwOgogICAgICAgICAgICBwcmludChmIkVwb2NoIHtlcG9jaH0sIEVycm9yOiB7dG90YWxfZXJyb3J9IikKICAgIAogICAgcmV0dXJuIHdlaWdodHMsIGJpYXNlcwoKIyBUZXN0IHRoZSBuZXR3b3JrIHdpdGggdGhlIHByb3ZpZGVkIHRydXRoIHRhYmxlcwpkZWYgdGVzdF9uZXVyYWxfbmV0d29yayh3ZWlnaHRzLCBiaWFzZXMsIGlucHV0X2RhdGEpOgogICAgcHJlZGljdGlvbnMgPSBbXQogICAgZm9yIGlucHV0X3ZlY3RvciBpbiBpbnB1dF9kYXRhOgogICAgICAgIG91dHB1dCA9IGZvcndhcmRfcGFzcyhpbnB1dF92ZWN0b3IsIHdlaWdodHMsIGJpYXNlcykKICAgICAgICBwcmVkaWN0aW9ucy5hcHBlbmQob3V0cHV0KQogICAgcmV0dXJuIHByZWRpY3Rpb25zCgojIERlZmluZSB0aGUgdHJ1dGggdGFibGVzCmlucHV0X2RhdGEgPSBbCiAgICBbMCwgMCwgMCwgMF0sCiAgICBbMCwgMCwgMCwgMV0sCiAgICBbMCwgMCwgMSwgMF0sCiAgICBbMCwgMCwgMSwgMV0sCiAgICBbMCwgMSwgMCwgMF0sCiAgICBbMCwgMSwgMCwgMV0sCiAgICBbMCwgMSwgMSwgMF0sCiAgICBbMCwgMSwgMSwgMV0sCiAgICBbMSwgMCwgMCwgMF0sCiAgICBbMSwgMCwgMCwgMV0sCiAgICBbMSwgMCwgMSwgMF0sCiAgICBbMSwgMCwgMSwgMV0sCiAgICBbMSwgMSwgMCwgMF0sCiAgICBbMSwgMSwgMCwgMV0sCiAgICBbMSwgMSwgMSwgMF0sCiAgICBbMSwgMSwgMSwgMV0KXQoKIyBDb3JyZXNwb25kaW5nIHRhcmdldHMgKGZvciB0aGUgWE9SIHByb2JsZW0pCnRhcmdldF9kYXRhID0gWwogICAgWzAsIDAsIDAsIDBdLAogICAgWzAsIDAsIDAsIDFdLAogICAgWzAsIDAsIDEsIDBdLAogICAgWzAsIDAsIDEsIDFdLAogICAgWzAsIDEsIDAsIDBdLAogICAgWzAsIDEsIDAsIDFdLAogICAgWzAsIDEsIDEsIDBdLAogICAgWzAsIDEsIDEsIDFdLAogICAgWzEsIDAsIDAsIDBdLAogICAgWzEsIDAsIDAsIDFdLAogICAgWzEsIDAsIDEsIDBdLAogICAgWzEsIDAsIDEsIDFdLAogICAgWzEsIDEsIDAsIDBdLAogICAgWzEsIDEsIDAsIDFdLAogICAgWzEsIDEsIDEsIDBdLAogICAgWzEsIDEsIDEsIDFdCl0KCiMgVHJhaW4gdGhlIG5ldXJhbCBuZXR3b3JrCndlaWdodHMsIGJpYXNlcyA9IHRyYWluX25ldXJhbF9uZXR3b3JrKGlucHV0X2RhdGEsIHRhcmdldF9kYXRhLCBsZWFybmluZ19yYXRlPTAuMDAxKQoKIyBUZXN0IHRoZSBuZXVyYWwgbmV0d29yawpwcmVkaWN0aW9ucyA9IHRlc3RfbmV1cmFsX25ldHdvcmsod2VpZ2h0cywgYmlhc2VzLCBpbnB1dF9kYXRhKQoKIyBEaXNwbGF5IHRoZSByZXN1bHRzCmZvciBpLCAoaW5wdXRfdmVjdG9yLCB0YXJnZXRfdmVjdG9yLCBwcmVkaWN0aW9uKSBpbiBlbnVtZXJhdGUoemlwKGlucHV0X2RhdGEsIHRhcmdldF9kYXRhLCBwcmVkaWN0aW9ucykpOgogICAgcHJpbnQoZiJUYWJsZSB7aSsxfTogSW5wdXQ6IHtpbnB1dF92ZWN0b3J9LCBUYXJnZXQ6IHt0YXJnZXRfdmVjdG9yfSwgUHJlZGljdGlvbjoge3ByZWRpY3Rpb259LCBFcnJvcjoge25wLmFicyhucC5hcnJheSh0YXJnZXRfdmVjdG9yKSAtIG5wLmFycmF5KHByZWRpY3Rpb24pKX0iKQo=
Epoch 0, Error: 30.001706882323475
Epoch 100, Error: 5.691814090306985
Epoch 200, Error: 2.6755049196225897
Epoch 300, Error: 1.2893255968426054
Epoch 400, Error: 0.6394796565970304
Epoch 500, Error: 0.32878488413761187
Epoch 600, Error: 0.1762628377505781
Epoch 700, Error: 0.09882417777673828
Epoch 800, Error: 0.057896426582148025
Epoch 900, Error: 0.03528749909466773
Epoch 1000, Error: 0.022227717355829655
Epoch 1100, Error: 0.01436559134865939
Epoch 1200, Error: 0.0094626399996172
Epoch 1300, Error: 0.0063180931826432776
Epoch 1400, Error: 0.004258313244813741
Epoch 1500, Error: 0.002888422488023718
Epoch 1600, Error: 0.0019676269958325155
Epoch 1700, Error: 0.0013441898983668644
Epoch 1800, Error: 0.0009200155534099208
Epoch 1900, Error: 0.000630474187688474
Table 1: Input: [0, 0, 0, 0], Target: [0, 0, 0, 0], Prediction: [0.00557628 0.00708622 0.0046906 0.00512291], Error: [0.00557628 0.00708622 0.0046906 0.00512291]
Table 2: Input: [0, 0, 0, 1], Target: [0, 0, 0, 1], Prediction: [0.00317719 0.00404943 0.00271049 1.0026057 ], Error: [0.00317719 0.00404943 0.00271049 0.0026057 ]
Table 3: Input: [0, 0, 1, 0], Target: [0, 0, 1, 0], Prediction: [0.00323726 0.00404888 1.00241762 0.00294885], Error: [0.00323726 0.00404888 0.00241762 0.00294885]
Table 4: Input: [0, 0, 1, 1], Target: [0, 0, 1, 1], Prediction: [8.38161393e-04 1.01208912e-03 1.00043752e+00 1.00043163e+00], Error: [0.00083816 0.00101209 0.00043752 0.00043163]
Table 5: Input: [0, 1, 0, 0], Target: [0, 1, 0, 0], Prediction: [0.00320225 1.00372398 0.00267845 0.00296315], Error: [0.00320225 0.00372398 0.00267845 0.00296315]
Table 6: Input: [0, 1, 0, 1], Target: [0, 1, 0, 1], Prediction: [8.03152844e-04 1.00068719e+00 6.98338575e-04 1.00044593e+00], Error: [0.00080315 0.00068719 0.00069834 0.00044593]
Table 7: Input: [0, 1, 1, 0], Target: [0, 1, 1, 0], Prediction: [8.63223516e-04 1.00068664e+00 1.00040547e+00 7.89081134e-04], Error: [0.00086322 0.00068664 0.00040547 0.00078908]
Table 8: Input: [0, 1, 1, 1], Target: [0, 1, 1, 1], Prediction: [-0.00153587 0.99764985 0.99842537 0.99827187], Error: [0.00153587 0.00235015 0.00157463 0.00172813]
Table 9: Input: [1, 0, 0, 0], Target: [1, 0, 0, 0], Prediction: [1.00288204 0.00405951 0.00270566 0.00296291], Error: [0.00288204 0.00405951 0.00270566 0.00296291]
Table 10: Input: [1, 0, 0, 1], Target: [1, 0, 0, 1], Prediction: [1.00048294e+00 1.02271712e-03 7.25549842e-04 1.00044570e+00], Error: [0.00048294 0.00102272 0.00072555 0.0004457 ]
Table 11: Input: [1, 0, 1, 0], Target: [1, 0, 1, 0], Prediction: [1.00054301e+00 1.02216817e-03 1.00043269e+00 7.88848089e-04], Error: [0.00054301 0.00102217 0.00043269 0.00078885]
Table 12: Input: [1, 0, 1, 1], Target: [1, 0, 1, 1], Prediction: [ 0.99814392 -0.00201462 0.99845258 0.99827163], Error: [0.00185608 0.00201462 0.00154742 0.00172837]
Table 13: Input: [1, 1, 0, 0], Target: [1, 1, 0, 0], Prediction: [1.00050801e+00 1.00069727e+00 6.93506195e-04 8.03147341e-04], Error: [0.00050801 0.00069727 0.00069351 0.00080315]
Table 14: Input: [1, 1, 0, 1], Target: [1, 1, 0, 1], Prediction: [ 0.99810891 0.99766048 -0.0012866 0.99828593], Error: [0.00189109 0.00233952 0.0012866 0.00171407]
Table 15: Input: [1, 1, 1, 0], Target: [1, 1, 1, 0], Prediction: [ 0.99816898 0.99765993 0.99842053 -0.00137092], Error: [0.00183102 0.00234007 0.00157947 0.00137092]
Table 16: Input: [1, 1, 1, 1], Target: [1, 1, 1, 1], Prediction: [0.99576988 0.99462314 0.99644043 0.99611187], Error: [0.00423012 0.00537686 0.00355957 0.00388813]