Single Processing Unit
\begin{tikzpicture}[scale=0.8, every node/.style={scale=0.8}]
% Single neuron diagram - centered at (3,2)
\node[neuron, minimum size=1.2cm] (neuron) at (3,2) {$y$};
% Input nodes properly positioned
\node[left] (x1) at (0,3.2) {$x_1$};
\node[left] (x2) at (0,2.4) {$x_2$};
\node[left] (xdots) at (0,1.6) {$⋮$};
\node[left] (xD) at (0,0.8) {$x_D$};
\node[left] (bias) at (0,3.8) {$w_0$};
% Input connections with clear weight labels
\draw[connection] (x1) -- (neuron) node[pos=0.3, above] {$w_1$};
\draw[connection] (x2) -- (neuron) node[pos=0.3, above] {$w_2$};
\draw[connection] (xdots) -- (neuron);
\draw[connection] (xD) -- (neuron) node[pos=0.3, below] {$w_D$};
\draw[connection] (bias) -- (neuron) node[pos=0.35, above, sloped, font=\tiny] {bias};
% Output with clear spacing
\draw[connection] (neuron) -- (5.5,2) node[right] {$y := \sigma(z)$};
% Activation function annotation - better positioned
\node[above=0.5cm of neuron, font=\tiny] {Activation};
\node[above=0.25cm of neuron, font=\tiny] {Function, $\sigma$};
\end{tikzpicture}
\small{Single processing unit with inputs $x_1, …, x_D$, weights $w_1, …, w_D$, bias $w_0$, and activation function $\sigma$.}
Multi-Layer Perceptron
\begin{tikzpicture}[scale=0.7, every node/.style={scale=0.7}]
% Input layer - vertically centered
\foreach \y in {1,2,3,4} {
\node[input neuron] (I-\y) at (0,{4.5-\y}) {$x_\y$};
}
% Hidden layer 1 - centered with 5 nodes
\foreach \y in {1,2,3,4,5} {
\node[hidden neuron] (H1-\y) at (2.8,{5-\y}) {};
}
% Hidden layer 2 - centered with 3 nodes
\foreach \y in {1,2,3} {
\node[hidden neuron] (H2-\y) at (5.6,{3.5-\y}) {};
}
% Output layer - centered
\node[output neuron] (O-1) at (8.4,2) {$y$};
% Connections input to hidden1
\foreach \i in {1,2,3,4} {
\foreach \j in {1,2,3,4,5} {
\draw[connection, opacity=0.25] (I-\i) -- (H1-\j);
}
}
% Connections hidden1 to hidden2
\foreach \i in {1,2,3,4,5} {
\foreach \j in {1,2,3} {
\draw[connection, opacity=0.25] (H1-\i) -- (H2-\j);
}
}
% Connections hidden2 to output
\foreach \i in {1,2,3} {
\draw[connection, opacity=0.25] (H2-\i) -- (O-1);
}
% Layer labels - better positioned
\node[layer label] at (0,-0.8) {Input};
\node[layer label] at (2.8,-0.8) {Hidden 1};
\node[layer label] at (5.6,-0.8) {Hidden 2};
\node[layer label] at (8.4,-0.8) {Output};
\end{tikzpicture}
\small{Multi-layer perceptron with fully connected layers. Each connection represents a learnable weight parameter.}