diff --git a/神经网络/main.py b/神经网络/main.py index a73ed7b..ccd8c04 100644 --- a/神经网络/main.py +++ b/神经网络/main.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -神经网络 +神经网络(多层感知机) """ # 导入模块 @@ -21,6 +21,7 @@ class NeuralNetwork: structure: List[int], hidden_activate: Literal["relu"] = "relu", output_activate: Literal["linear", "softmax"] = "linear", + seed: int = 52, epsilon: float = 1e-9, ): """ @@ -28,6 +29,7 @@ class NeuralNetwork: :param structure: 神经网络结构,例如[2, 10, 1]表示2层神经网络,具体为输入层2个神经元、隐含层10个神经元、输出层1个神经元 :param hidden_activate: 隐含层的激活函数,默认为relu :param output_activate: 输出层的激活函数,默认为linear + :param seed: 随机种子,默认为52 :param epsilon: 极小值,默认为1e-9 """ print("正在初始化神经网络...", end="") @@ -50,48 +52,69 @@ class NeuralNetwork: raise RuntimeError(f"该输出层激活函数 {output_activate} 暂不支持") self.output_activate = output_activate - # 神经网络层数(定义第0层为输入层,第L层为输出层(L为神经网络层数),第l层为隐含层(l=1,2,...,L-1),深度为L+1) + # 神经网络层数(定义,第0层为输入层,第l层为隐含层(l=1,2,...,L-1),第L层为输出层(L为神经网络层数),深度为L+1) self.layer_counts = len(structure) - 1 - self.parameters = {0: {}} + numpy.random.seed(seed) # 设置随机种子 + + self.parameters = {} # 初始化神经网络参数 for layer_index in range(1, self.layer_counts + 1): - # 上一层和当前层神经元数量 + # 上一层和当前层神经元数 previous_layer_neuron_counts, current_layer_neuron_counts = ( self.structure[layer_index - 1], self.structure[layer_index], ) self.parameters[layer_index] = { + "activate": ( + activate := ( + self.hidden_activate + if layer_index != self.layer_counts + else self.output_activate + ) + ), # 激活函数 "weight": numpy.random.randn( current_layer_neuron_counts, previous_layer_neuron_counts ) - * ( - numpy.sqrt(2 / previous_layer_neuron_counts) - if layer_index < self.layer_counts - else ( - numpy.sqrt(1 / previous_layer_neuron_counts) - if self.output_activate == "linear" - else numpy.sqrt( - 2 - / ( - previous_layer_neuron_counts - + current_layer_neuron_counts - ) - ) - ) - ), # 权重,维度为[当前层神经元数量,上一层神经元数量],适配加权输入=权重*输入+平移。隐含层使用He初始化权重方法、输出层激活函数若为linear则使用标准Xavier初始化权重方法否则使用改进Xavier初始化权重方法 + * self._calculate_init_weight_scale( + activate=activate, + previous_layer_neuron_counts=previous_layer_neuron_counts, + current_layer_neuron_counts=current_layer_neuron_counts, + ), # 权重,维度为[当前层神经元数,上一层神经元数],适配加权输入=权重*输入+平移 "bias": numpy.zeros((current_layer_neuron_counts, 1)), # 平移 - "activate": ( - self.hidden_activate - if layer_index < self.layer_counts - else self.output_activate - ), # 激活函数 } self.epsilon = epsilon print("已完成") + def _calculate_init_weight_scale( + self, + activate: Literal["relu", "linear", "softmax"], + previous_layer_neuron_counts: int, + current_layer_neuron_counts: int, + ) -> numpy.floating: + """ + 计算初始化权重时缩放因子 + :param activate: 激活函数 + :param previous_layer_neuron_counts: 上一层神经元数 + :param current_layer_neuron_counts: 当前层神经元数 + :return: 初始化权重时缩放因子 + """ + match activate: + case "relu": + return numpy.sqrt( + 2 / previous_layer_neuron_counts + ) # 使用He初始化权重方法 + case "linear": + return numpy.sqrt( + 2 / previous_layer_neuron_counts + ) # 使用He初始化权重方法 + case "softmax": + return numpy.sqrt( + 2 / (previous_layer_neuron_counts + current_layer_neuron_counts) + ) # 使用Xavier初始化权重方法 + def train( self, X: numpy.ndarray, @@ -110,7 +133,7 @@ class NeuralNetwork: :return: 无 """ print( - f"开始训练:目标损失为 {target_loss},学习轮数为 {epochs},学习率为 {learning_rate}..." + f"开始训练神经网络:目标损失为 {target_loss},学习轮数为 {epochs},学习率为 {learning_rate}..." ) if not ( X.shape[1] == y_true.shape[1] @@ -122,10 +145,8 @@ class NeuralNetwork: raise RuntimeError( "输入和真实输出应为数组,其中输入维度应为[输入神经元数, 样本数],真实输出维度应为[输出神经元数, 样本数],样本数应需相同" ) - # 归一化输入 - self.parameters[0]["activation"] = self._normalize( - input=X - ) # 将输入作为输入层的输出 + # 归一化输入并将其作为输入层的输出 + self.parameters[0] = {"activation": self._normalize(input=X)} epoch = 1 while True: @@ -151,12 +172,6 @@ class NeuralNetwork: print(f"第 {epoch:6d} 轮损失为 {loss:9.3f},继续训练...") epoch += 1 - for idx in numpy.random.choice(X.shape[1], size=10, replace=False): - y_true_val = y_true[0, idx] - y_pred_val = self.parameters[self.layer_counts]["activation"][0, idx] - error = abs(y_true_val - y_pred_val) - print(f"{idx:<10} {y_true_val:<15.4f} {y_pred_val:<15.4f} {error:<15.4f}") - def _normalize( self, input: numpy.ndarray, @@ -181,15 +196,15 @@ class NeuralNetwork: "weighted_input": ( weighted_input := numpy.dot( self.parameters[layer_index]["weight"], - self.parameters[layer_index - 1]["activation"], + self.parameters[layer_index - 1][ + "activation" + ], # 将上一层的输出作为当前层的输入 ) + self.parameters[layer_index]["bias"] - ), # 加权输入,维度为[当前层神经元数量,样本数],将上一层的输出作为当前层的输入 - "activation": ( - activation := self._activate( - activate=self.parameters[layer_index]["activate"], - input=weighted_input, - ) + ), # 加权输入,维度为[当前层神经元数,样本数],将上一层的输出作为当前层的输入 + "activation": self._activate( + activate=self.parameters[layer_index]["activate"], + input=weighted_input, ), # 输出 } ) @@ -200,7 +215,7 @@ class NeuralNetwork: input: numpy.ndarray, ) -> numpy.ndarray: """ - 根据激活函数计算输入 + 激活 :param activate: 激活函数 :param input: 输入 :return: 经过激活函数计算后的输入,维度与输入相同 @@ -228,27 +243,47 @@ class NeuralNetwork: :param y_true: 真实输出,维度为[输出神经元数, 样本数] :return: 损失 """ - return ( - 0.5 - * numpy.mean( - numpy.square(y_true - self.parameters[self.layer_counts]["activation"]) - ) - if self.parameters[self.layer_counts]["activate"] == "linear" - else -1 - * numpy.mean( - numpy.sum( - y_true - * numpy.log( - numpy.clip( - self.parameters[self.layer_counts]["activation"], - self.epsilon, - 1 - self.epsilon, + match self.parameters[self.layer_counts]["activate"]: + case "linear": + return 0.5 * numpy.mean( + numpy.square( + y_true - self.parameters[self.layer_counts]["activation"] + ) + ) # 若输出层的激活函数为linear则损失函数使用0.5*均方误差 + case "softmax": + return numpy.mean( + ( + numpy.max( + self.parameters[self.layer_counts]["weighted_input"], + axis=0, + keepdims=True, ) - ), - axis=0, + + numpy.log( + numpy.sum( + numpy.exp( + self.parameters[self.layer_counts]["weighted_input"] + - numpy.max( + self.parameters[self.layer_counts][ + "weighted_input" + ], + axis=0, + keepdims=True, + ) + ), + axis=0, + keepdims=True, + ) + ) + ).squeeze() + - numpy.sum( + y_true * self.parameters[self.layer_counts]["weighted_input"], + axis=0, + ) + ) # 若输出层的激活函数为softmax则损失函数使用交叉熵 + case _: + raise RuntimeError( + f"该激活函数 {self.parameters[self.layer_counts]["activate"]} 暂不支持" ) - ) - ) # 若输出层的激活函数为linear则损失函数基于均方误差否则基于交叉熵 def _backward_propagate( self, @@ -259,49 +294,45 @@ class NeuralNetwork: :param y_true: 真实输输出,维度为[输出神经元数, 样本数] :return: 无 """ - sample_counts = X.shape[1] # 样本数 - - # 损失对输出层的加权输入的梯度 - self.parameters[self.layer_counts]["delta_weighted_input"] = ( - self.parameters[self.layer_counts]["activation"] - y_true - ) / sample_counts # 损失函数基于均方误差和交叉熵对输出层的加权输入的梯度相同 - + sample_counts = y_true.shape[1] # 样本数 for layer_index in range(self.layer_counts, 0, -1): self.parameters[layer_index].update( { + "delta_activation": ( + delta_activation := ( + (self.parameters[self.layer_counts]["activation"] - y_true) + / sample_counts + if layer_index == self.layer_counts + else numpy.dot( + self.parameters[layer_index + 1]["weight"].T, + self.parameters[layer_index + 1][ + "delta_weighted_input" + ], + ) + ) + ), # 若为输出层则直接计算输出的梯度,否则基于下一层的权重转置和加权输入的梯度计算当前层的输出梯度 + "delta_weighted_input": ( + delta_weighted_input := delta_activation + * self._activate_derivative( + activate=self.parameters[layer_index]["activate"], + input=self.parameters[layer_index]["weighted_input"], + ) + ), # 加权输入的梯度 "delta_weight": numpy.dot( - self.parameters[layer_index]["delta_weighted_input"], - ( - X - if layer_index == 1 - else self.parameters[layer_index - 1]["activation"] - ).T, + delta_weighted_input, + (self.parameters[layer_index - 1]["activation"]).T, ), # 权重的梯度 "delta_bias": numpy.sum( - self.parameters[layer_index]["delta_weighted_input"], + delta_weighted_input, axis=1, keepdims=True, ), # 偏置的梯度 } ) - if layer_index != 1: - self.parameters[layer_index - 1].update( - { - "delta_weighted_input": numpy.dot( - self.parameters[layer_index]["weight"].T, - self.parameters[layer_index]["delta_weighted_input"], - ) - * self._activate_derivative( - activate=self.parameters[layer_index - 1]["activate"], - input=self.parameters[layer_index - 1]["weighted_input"], - ), - } - ) - def _activate_derivative( self, - activate: Literal["relu"], + activate: Literal["relu", "linear", "softmax"], input: numpy.ndarray, ) -> numpy.ndarray: """ @@ -313,6 +344,14 @@ class NeuralNetwork: match activate: case "relu": return numpy.where(input > 0, 1, 0) + case "linear": + return numpy.ones_like(input) + case "softmax": + activation = self._activate( + activate=activate, + input=input, + ) + return activation * (1 - activation) def _update_parameters(self, learning_rate: float) -> None: """ @@ -335,21 +374,17 @@ class NeuralNetwork: # 测试代码 if __name__ == "__main__": - # 生成测试数据(回归任务) - numpy.random.seed(42) # 设置随机种子保证可复现 + X = numpy.random.randn(2, 100) - # 真实函数:y = 2*x1 + 3*x2 + 1 (加噪声) + # 真实函数:y = 2*x1 + 3*x2 + 1 y_true = 2 * X[0:1, :] ** 2 + 3 * X[1:2, :] + 1 # 创建并训练神经网络 neural_network = NeuralNetwork( - structure=[2, 16, 4, 1], # 2输入,10隐藏神经元,1输出 + structure=[2, 256, 128, 1], # 2输入,10隐藏神经元,1输出 ) # 训练 neural_network.train( - X=X, y_true=y_true, target_loss=0.001, epochs=1000, learning_rate=0.001 + X=X, y_true=y_true, target_loss=0.05, epochs=1_000, learning_rate=0.05 ) - - print(neural_network.parameters[2]["activation"]) - print(neural_network.parameters[3]["activation"])