Quick Learnology

McCulloch-Pitts Neuron Model

import numpy as np
np.random.seed(seed=0)
I = np.random.choice([0,1], 3)# generate random vector I, sampling from {0,1}
W = np.random.choice([-1,1], 3) # generate random vector W, sampling from {-1,1} 
print(f'Input vector:{I}, Weight vector:{W}')

O/p : Input vector:[0 1 1], Weight vector:[-1 1 1]

dot = I @ W
print(f'Dot product: {dot}')
O/p : Dot product: 2
def linear_threshold_gate(dot: int, T: float) -> int:
    '''Returns the binary threshold output'''
    if dot >= T:
        return 1
    else:
        return 0
T = 1
activation = linear_threshold_gate(dot, T)
print(f'Activation: {activation}')
O/P : Activation: 1
T = 3
activation = linear_threshold_gate(dot, T)
print(f'Activation: {activation}')

O/P : Activation: 0

# matrix of inputs
input_table = np.array([
    [0,0], # both no
    [0,1], # one no, one yes
    [1,0], # one yes, one no
    [1,1]  # bot yes
])

print(f'input table:\n{input_table}')

O/P :

input table:
[[0 0]
 [0 1]
 [1 0]
 [1 1]]
# array of weights
weights = np.array([1,1])
print(f'weights: {weights}')
O/P : weights: [1 1]
# dot product matrix of inputs and weights
dot_products = input_table @ weights
print(f'Dot products: {dot_products}')
O/P : Dot products: [0 1 1 2]
T = 2
for i in range(0,4):
    activation = linear_threshold_gate(dot_products[i], T)
    print(f'Activation: {activation}')
O/P: 
Activation: 0
Activation: 0
Activation: 0
Activation: 1

Leave a Comment

Your email address will not be published. Required fields are marked *