This commit is contained in:
parent
f30c0f9d14
commit
d8737aa2bd
233
神经网络/main.py
233
神经网络/main.py
|
|
@ -1,6 +1,6 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""
|
"""
|
||||||
神经网络
|
神经网络(多层感知机)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# 导入模块
|
# 导入模块
|
||||||
|
|
@ -21,6 +21,7 @@ class NeuralNetwork:
|
||||||
structure: List[int],
|
structure: List[int],
|
||||||
hidden_activate: Literal["relu"] = "relu",
|
hidden_activate: Literal["relu"] = "relu",
|
||||||
output_activate: Literal["linear", "softmax"] = "linear",
|
output_activate: Literal["linear", "softmax"] = "linear",
|
||||||
|
seed: int = 52,
|
||||||
epsilon: float = 1e-9,
|
epsilon: float = 1e-9,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
|
@ -28,6 +29,7 @@ class NeuralNetwork:
|
||||||
:param structure: 神经网络结构,例如[2, 10, 1]表示2层神经网络,具体为输入层2个神经元、隐含层10个神经元、输出层1个神经元
|
:param structure: 神经网络结构,例如[2, 10, 1]表示2层神经网络,具体为输入层2个神经元、隐含层10个神经元、输出层1个神经元
|
||||||
:param hidden_activate: 隐含层的激活函数,默认为relu
|
:param hidden_activate: 隐含层的激活函数,默认为relu
|
||||||
:param output_activate: 输出层的激活函数,默认为linear
|
:param output_activate: 输出层的激活函数,默认为linear
|
||||||
|
:param seed: 随机种子,默认为52
|
||||||
:param epsilon: 极小值,默认为1e-9
|
:param epsilon: 极小值,默认为1e-9
|
||||||
"""
|
"""
|
||||||
print("正在初始化神经网络...", end="")
|
print("正在初始化神经网络...", end="")
|
||||||
|
|
@ -50,48 +52,69 @@ class NeuralNetwork:
|
||||||
raise RuntimeError(f"该输出层激活函数 {output_activate} 暂不支持")
|
raise RuntimeError(f"该输出层激活函数 {output_activate} 暂不支持")
|
||||||
self.output_activate = output_activate
|
self.output_activate = output_activate
|
||||||
|
|
||||||
# 神经网络层数(定义第0层为输入层,第L层为输出层(L为神经网络层数),第l层为隐含层(l=1,2,...,L-1),深度为L+1)
|
# 神经网络层数(定义,第0层为输入层,第l层为隐含层(l=1,2,...,L-1),第L层为输出层(L为神经网络层数),深度为L+1)
|
||||||
self.layer_counts = len(structure) - 1
|
self.layer_counts = len(structure) - 1
|
||||||
|
|
||||||
self.parameters = {0: {}}
|
numpy.random.seed(seed) # 设置随机种子
|
||||||
|
|
||||||
|
self.parameters = {}
|
||||||
# 初始化神经网络参数
|
# 初始化神经网络参数
|
||||||
for layer_index in range(1, self.layer_counts + 1):
|
for layer_index in range(1, self.layer_counts + 1):
|
||||||
# 上一层和当前层神经元数量
|
# 上一层和当前层神经元数
|
||||||
previous_layer_neuron_counts, current_layer_neuron_counts = (
|
previous_layer_neuron_counts, current_layer_neuron_counts = (
|
||||||
self.structure[layer_index - 1],
|
self.structure[layer_index - 1],
|
||||||
self.structure[layer_index],
|
self.structure[layer_index],
|
||||||
)
|
)
|
||||||
self.parameters[layer_index] = {
|
self.parameters[layer_index] = {
|
||||||
|
"activate": (
|
||||||
|
activate := (
|
||||||
|
self.hidden_activate
|
||||||
|
if layer_index != self.layer_counts
|
||||||
|
else self.output_activate
|
||||||
|
)
|
||||||
|
), # 激活函数
|
||||||
"weight": numpy.random.randn(
|
"weight": numpy.random.randn(
|
||||||
current_layer_neuron_counts, previous_layer_neuron_counts
|
current_layer_neuron_counts, previous_layer_neuron_counts
|
||||||
)
|
)
|
||||||
* (
|
* self._calculate_init_weight_scale(
|
||||||
numpy.sqrt(2 / previous_layer_neuron_counts)
|
activate=activate,
|
||||||
if layer_index < self.layer_counts
|
previous_layer_neuron_counts=previous_layer_neuron_counts,
|
||||||
else (
|
current_layer_neuron_counts=current_layer_neuron_counts,
|
||||||
numpy.sqrt(1 / previous_layer_neuron_counts)
|
), # 权重,维度为[当前层神经元数,上一层神经元数],适配加权输入=权重*输入+平移
|
||||||
if self.output_activate == "linear"
|
|
||||||
else numpy.sqrt(
|
|
||||||
2
|
|
||||||
/ (
|
|
||||||
previous_layer_neuron_counts
|
|
||||||
+ current_layer_neuron_counts
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
), # 权重,维度为[当前层神经元数量,上一层神经元数量],适配加权输入=权重*输入+平移。隐含层使用He初始化权重方法、输出层激活函数若为linear则使用标准Xavier初始化权重方法否则使用改进Xavier初始化权重方法
|
|
||||||
"bias": numpy.zeros((current_layer_neuron_counts, 1)), # 平移
|
"bias": numpy.zeros((current_layer_neuron_counts, 1)), # 平移
|
||||||
"activate": (
|
|
||||||
self.hidden_activate
|
|
||||||
if layer_index < self.layer_counts
|
|
||||||
else self.output_activate
|
|
||||||
), # 激活函数
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.epsilon = epsilon
|
self.epsilon = epsilon
|
||||||
|
|
||||||
print("已完成")
|
print("已完成")
|
||||||
|
|
||||||
|
def _calculate_init_weight_scale(
|
||||||
|
self,
|
||||||
|
activate: Literal["relu", "linear", "softmax"],
|
||||||
|
previous_layer_neuron_counts: int,
|
||||||
|
current_layer_neuron_counts: int,
|
||||||
|
) -> numpy.floating:
|
||||||
|
"""
|
||||||
|
计算初始化权重时缩放因子
|
||||||
|
:param activate: 激活函数
|
||||||
|
:param previous_layer_neuron_counts: 上一层神经元数
|
||||||
|
:param current_layer_neuron_counts: 当前层神经元数
|
||||||
|
:return: 初始化权重时缩放因子
|
||||||
|
"""
|
||||||
|
match activate:
|
||||||
|
case "relu":
|
||||||
|
return numpy.sqrt(
|
||||||
|
2 / previous_layer_neuron_counts
|
||||||
|
) # 使用He初始化权重方法
|
||||||
|
case "linear":
|
||||||
|
return numpy.sqrt(
|
||||||
|
2 / previous_layer_neuron_counts
|
||||||
|
) # 使用He初始化权重方法
|
||||||
|
case "softmax":
|
||||||
|
return numpy.sqrt(
|
||||||
|
2 / (previous_layer_neuron_counts + current_layer_neuron_counts)
|
||||||
|
) # 使用Xavier初始化权重方法
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self,
|
self,
|
||||||
X: numpy.ndarray,
|
X: numpy.ndarray,
|
||||||
|
|
@ -110,7 +133,7 @@ class NeuralNetwork:
|
||||||
:return: 无
|
:return: 无
|
||||||
"""
|
"""
|
||||||
print(
|
print(
|
||||||
f"开始训练:目标损失为 {target_loss},学习轮数为 {epochs},学习率为 {learning_rate}..."
|
f"开始训练神经网络:目标损失为 {target_loss},学习轮数为 {epochs},学习率为 {learning_rate}..."
|
||||||
)
|
)
|
||||||
if not (
|
if not (
|
||||||
X.shape[1] == y_true.shape[1]
|
X.shape[1] == y_true.shape[1]
|
||||||
|
|
@ -122,10 +145,8 @@ class NeuralNetwork:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"输入和真实输出应为数组,其中输入维度应为[输入神经元数, 样本数],真实输出维度应为[输出神经元数, 样本数],样本数应需相同"
|
"输入和真实输出应为数组,其中输入维度应为[输入神经元数, 样本数],真实输出维度应为[输出神经元数, 样本数],样本数应需相同"
|
||||||
)
|
)
|
||||||
# 归一化输入
|
# 归一化输入并将其作为输入层的输出
|
||||||
self.parameters[0]["activation"] = self._normalize(
|
self.parameters[0] = {"activation": self._normalize(input=X)}
|
||||||
input=X
|
|
||||||
) # 将输入作为输入层的输出
|
|
||||||
|
|
||||||
epoch = 1
|
epoch = 1
|
||||||
while True:
|
while True:
|
||||||
|
|
@ -151,12 +172,6 @@ class NeuralNetwork:
|
||||||
print(f"第 {epoch:6d} 轮损失为 {loss:9.3f},继续训练...")
|
print(f"第 {epoch:6d} 轮损失为 {loss:9.3f},继续训练...")
|
||||||
epoch += 1
|
epoch += 1
|
||||||
|
|
||||||
for idx in numpy.random.choice(X.shape[1], size=10, replace=False):
|
|
||||||
y_true_val = y_true[0, idx]
|
|
||||||
y_pred_val = self.parameters[self.layer_counts]["activation"][0, idx]
|
|
||||||
error = abs(y_true_val - y_pred_val)
|
|
||||||
print(f"{idx:<10} {y_true_val:<15.4f} {y_pred_val:<15.4f} {error:<15.4f}")
|
|
||||||
|
|
||||||
def _normalize(
|
def _normalize(
|
||||||
self,
|
self,
|
||||||
input: numpy.ndarray,
|
input: numpy.ndarray,
|
||||||
|
|
@ -181,15 +196,15 @@ class NeuralNetwork:
|
||||||
"weighted_input": (
|
"weighted_input": (
|
||||||
weighted_input := numpy.dot(
|
weighted_input := numpy.dot(
|
||||||
self.parameters[layer_index]["weight"],
|
self.parameters[layer_index]["weight"],
|
||||||
self.parameters[layer_index - 1]["activation"],
|
self.parameters[layer_index - 1][
|
||||||
|
"activation"
|
||||||
|
], # 将上一层的输出作为当前层的输入
|
||||||
)
|
)
|
||||||
+ self.parameters[layer_index]["bias"]
|
+ self.parameters[layer_index]["bias"]
|
||||||
), # 加权输入,维度为[当前层神经元数量,样本数],将上一层的输出作为当前层的输入
|
), # 加权输入,维度为[当前层神经元数,样本数],将上一层的输出作为当前层的输入
|
||||||
"activation": (
|
"activation": self._activate(
|
||||||
activation := self._activate(
|
activate=self.parameters[layer_index]["activate"],
|
||||||
activate=self.parameters[layer_index]["activate"],
|
input=weighted_input,
|
||||||
input=weighted_input,
|
|
||||||
)
|
|
||||||
), # 输出
|
), # 输出
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
@ -200,7 +215,7 @@ class NeuralNetwork:
|
||||||
input: numpy.ndarray,
|
input: numpy.ndarray,
|
||||||
) -> numpy.ndarray:
|
) -> numpy.ndarray:
|
||||||
"""
|
"""
|
||||||
根据激活函数计算输入
|
激活
|
||||||
:param activate: 激活函数
|
:param activate: 激活函数
|
||||||
:param input: 输入
|
:param input: 输入
|
||||||
:return: 经过激活函数计算后的输入,维度与输入相同
|
:return: 经过激活函数计算后的输入,维度与输入相同
|
||||||
|
|
@ -228,27 +243,47 @@ class NeuralNetwork:
|
||||||
:param y_true: 真实输出,维度为[输出神经元数, 样本数]
|
:param y_true: 真实输出,维度为[输出神经元数, 样本数]
|
||||||
:return: 损失
|
:return: 损失
|
||||||
"""
|
"""
|
||||||
return (
|
match self.parameters[self.layer_counts]["activate"]:
|
||||||
0.5
|
case "linear":
|
||||||
* numpy.mean(
|
return 0.5 * numpy.mean(
|
||||||
numpy.square(y_true - self.parameters[self.layer_counts]["activation"])
|
numpy.square(
|
||||||
)
|
y_true - self.parameters[self.layer_counts]["activation"]
|
||||||
if self.parameters[self.layer_counts]["activate"] == "linear"
|
)
|
||||||
else -1
|
) # 若输出层的激活函数为linear则损失函数使用0.5*均方误差
|
||||||
* numpy.mean(
|
case "softmax":
|
||||||
numpy.sum(
|
return numpy.mean(
|
||||||
y_true
|
(
|
||||||
* numpy.log(
|
numpy.max(
|
||||||
numpy.clip(
|
self.parameters[self.layer_counts]["weighted_input"],
|
||||||
self.parameters[self.layer_counts]["activation"],
|
axis=0,
|
||||||
self.epsilon,
|
keepdims=True,
|
||||||
1 - self.epsilon,
|
|
||||||
)
|
)
|
||||||
),
|
+ numpy.log(
|
||||||
axis=0,
|
numpy.sum(
|
||||||
|
numpy.exp(
|
||||||
|
self.parameters[self.layer_counts]["weighted_input"]
|
||||||
|
- numpy.max(
|
||||||
|
self.parameters[self.layer_counts][
|
||||||
|
"weighted_input"
|
||||||
|
],
|
||||||
|
axis=0,
|
||||||
|
keepdims=True,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
axis=0,
|
||||||
|
keepdims=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).squeeze()
|
||||||
|
- numpy.sum(
|
||||||
|
y_true * self.parameters[self.layer_counts]["weighted_input"],
|
||||||
|
axis=0,
|
||||||
|
)
|
||||||
|
) # 若输出层的激活函数为softmax则损失函数使用交叉熵
|
||||||
|
case _:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"该激活函数 {self.parameters[self.layer_counts]["activate"]} 暂不支持"
|
||||||
)
|
)
|
||||||
)
|
|
||||||
) # 若输出层的激活函数为linear则损失函数基于均方误差否则基于交叉熵
|
|
||||||
|
|
||||||
def _backward_propagate(
|
def _backward_propagate(
|
||||||
self,
|
self,
|
||||||
|
|
@ -259,49 +294,45 @@ class NeuralNetwork:
|
||||||
:param y_true: 真实输输出,维度为[输出神经元数, 样本数]
|
:param y_true: 真实输输出,维度为[输出神经元数, 样本数]
|
||||||
:return: 无
|
:return: 无
|
||||||
"""
|
"""
|
||||||
sample_counts = X.shape[1] # 样本数
|
sample_counts = y_true.shape[1] # 样本数
|
||||||
|
|
||||||
# 损失对输出层的加权输入的梯度
|
|
||||||
self.parameters[self.layer_counts]["delta_weighted_input"] = (
|
|
||||||
self.parameters[self.layer_counts]["activation"] - y_true
|
|
||||||
) / sample_counts # 损失函数基于均方误差和交叉熵对输出层的加权输入的梯度相同
|
|
||||||
|
|
||||||
for layer_index in range(self.layer_counts, 0, -1):
|
for layer_index in range(self.layer_counts, 0, -1):
|
||||||
self.parameters[layer_index].update(
|
self.parameters[layer_index].update(
|
||||||
{
|
{
|
||||||
|
"delta_activation": (
|
||||||
|
delta_activation := (
|
||||||
|
(self.parameters[self.layer_counts]["activation"] - y_true)
|
||||||
|
/ sample_counts
|
||||||
|
if layer_index == self.layer_counts
|
||||||
|
else numpy.dot(
|
||||||
|
self.parameters[layer_index + 1]["weight"].T,
|
||||||
|
self.parameters[layer_index + 1][
|
||||||
|
"delta_weighted_input"
|
||||||
|
],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
), # 若为输出层则直接计算输出的梯度,否则基于下一层的权重转置和加权输入的梯度计算当前层的输出梯度
|
||||||
|
"delta_weighted_input": (
|
||||||
|
delta_weighted_input := delta_activation
|
||||||
|
* self._activate_derivative(
|
||||||
|
activate=self.parameters[layer_index]["activate"],
|
||||||
|
input=self.parameters[layer_index]["weighted_input"],
|
||||||
|
)
|
||||||
|
), # 加权输入的梯度
|
||||||
"delta_weight": numpy.dot(
|
"delta_weight": numpy.dot(
|
||||||
self.parameters[layer_index]["delta_weighted_input"],
|
delta_weighted_input,
|
||||||
(
|
(self.parameters[layer_index - 1]["activation"]).T,
|
||||||
X
|
|
||||||
if layer_index == 1
|
|
||||||
else self.parameters[layer_index - 1]["activation"]
|
|
||||||
).T,
|
|
||||||
), # 权重的梯度
|
), # 权重的梯度
|
||||||
"delta_bias": numpy.sum(
|
"delta_bias": numpy.sum(
|
||||||
self.parameters[layer_index]["delta_weighted_input"],
|
delta_weighted_input,
|
||||||
axis=1,
|
axis=1,
|
||||||
keepdims=True,
|
keepdims=True,
|
||||||
), # 偏置的梯度
|
), # 偏置的梯度
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if layer_index != 1:
|
|
||||||
self.parameters[layer_index - 1].update(
|
|
||||||
{
|
|
||||||
"delta_weighted_input": numpy.dot(
|
|
||||||
self.parameters[layer_index]["weight"].T,
|
|
||||||
self.parameters[layer_index]["delta_weighted_input"],
|
|
||||||
)
|
|
||||||
* self._activate_derivative(
|
|
||||||
activate=self.parameters[layer_index - 1]["activate"],
|
|
||||||
input=self.parameters[layer_index - 1]["weighted_input"],
|
|
||||||
),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def _activate_derivative(
|
def _activate_derivative(
|
||||||
self,
|
self,
|
||||||
activate: Literal["relu"],
|
activate: Literal["relu", "linear", "softmax"],
|
||||||
input: numpy.ndarray,
|
input: numpy.ndarray,
|
||||||
) -> numpy.ndarray:
|
) -> numpy.ndarray:
|
||||||
"""
|
"""
|
||||||
|
|
@ -313,6 +344,14 @@ class NeuralNetwork:
|
||||||
match activate:
|
match activate:
|
||||||
case "relu":
|
case "relu":
|
||||||
return numpy.where(input > 0, 1, 0)
|
return numpy.where(input > 0, 1, 0)
|
||||||
|
case "linear":
|
||||||
|
return numpy.ones_like(input)
|
||||||
|
case "softmax":
|
||||||
|
activation = self._activate(
|
||||||
|
activate=activate,
|
||||||
|
input=input,
|
||||||
|
)
|
||||||
|
return activation * (1 - activation)
|
||||||
|
|
||||||
def _update_parameters(self, learning_rate: float) -> None:
|
def _update_parameters(self, learning_rate: float) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
@ -335,21 +374,17 @@ class NeuralNetwork:
|
||||||
|
|
||||||
# 测试代码
|
# 测试代码
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# 生成测试数据(回归任务)
|
|
||||||
numpy.random.seed(42) # 设置随机种子保证可复现
|
|
||||||
X = numpy.random.randn(2, 100)
|
X = numpy.random.randn(2, 100)
|
||||||
# 真实函数:y = 2*x1 + 3*x2 + 1 (加噪声)
|
# 真实函数:y = 2*x1 + 3*x2 + 1
|
||||||
y_true = 2 * X[0:1, :] ** 2 + 3 * X[1:2, :] + 1
|
y_true = 2 * X[0:1, :] ** 2 + 3 * X[1:2, :] + 1
|
||||||
|
|
||||||
# 创建并训练神经网络
|
# 创建并训练神经网络
|
||||||
neural_network = NeuralNetwork(
|
neural_network = NeuralNetwork(
|
||||||
structure=[2, 16, 4, 1], # 2输入,10隐藏神经元,1输出
|
structure=[2, 256, 128, 1], # 2输入,10隐藏神经元,1输出
|
||||||
)
|
)
|
||||||
|
|
||||||
# 训练
|
# 训练
|
||||||
neural_network.train(
|
neural_network.train(
|
||||||
X=X, y_true=y_true, target_loss=0.001, epochs=1000, learning_rate=0.001
|
X=X, y_true=y_true, target_loss=0.05, epochs=1_000, learning_rate=0.05
|
||||||
)
|
)
|
||||||
|
|
||||||
print(neural_network.parameters[2]["activation"])
|
|
||||||
print(neural_network.parameters[3]["activation"])
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue