我想画一幅神经网络的动态图,观察学习过程中权重的变化和神经元的激活。如何在Python中模拟该过程?
更准确地说,如果网络形状是:[1000,300,50],那么我希望绘制一个三层的神经网络,其中分别包含1000,300和50个神经元。此外,我希望这张图片能够反映出每一时期每一层神经元的饱和程度。
我不知道怎么做。有人能告诉我一些情况吗?
为了实现Mykhaylo的建议,我稍微修改了Milo的代码,以便允许提供weghts作为参数,这将影响每一行的宽度。此参数是可选的,因为没有为最后一层提供权重的意义。所有这些都是为了能够可视化我对神经网络练习的解决方案。我已经给出了二进制权重(0或1),因此根本不会绘制权重为零的线(以使图像更清晰)。
from matplotlib import pyplot
from math import cos, sin, atan
import numpy as np
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons, weights):
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
self.weights = weights
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return horizontal_distance_between_neurons * (number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2, linewidth):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = neuron_radius * sin(angle)
y_adjustment = neuron_radius * cos(angle)
line_x_data = (neuron1.x - x_adjustment, neuron2.x + x_adjustment)
line_y_data = (neuron1.y - y_adjustment, neuron2.y + y_adjustment)
line = pyplot.Line2D(line_x_data, line_y_data, linewidth=linewidth)
pyplot.gca().add_line(line)
def draw(self):
for this_layer_neuron_index in range(len(self.neurons)):
neuron = self.neurons[this_layer_neuron_index]
neuron.draw()
if self.previous_layer:
for previous_layer_neuron_index in range(len(self.previous_layer.neurons)):
previous_layer_neuron = self.previous_layer.neurons[previous_layer_neuron_index]
weight = self.previous_layer.weights[this_layer_neuron_index, previous_layer_neuron_index]
self.__line_between_two_neurons(neuron, previous_layer_neuron, weight)
class NeuralNetwork():
def __init__(self):
self.layers = []
def add_layer(self, number_of_neurons, weights=None):
layer = Layer(self, number_of_neurons, weights)
self.layers.append(layer)
def draw(self):
for layer in self.layers:
layer.draw()
pyplot.axis('scaled')
pyplot.show()
if __name__ == "__main__":
vertical_distance_between_layers = 6
horizontal_distance_between_neurons = 2
neuron_radius = 0.5
number_of_neurons_in_widest_layer = 4
network = NeuralNetwork()
# weights to convert from 10 outputs to 4 (decimal digits to their binary representation)
weights1 = np.array([\
[0,0,0,0,0,0,0,0,1,1],\
[0,0,0,0,1,1,1,1,0,0],\
[0,0,1,1,0,0,1,1,0,0],\
[0,1,0,1,0,1,0,1,0,1]])
network.add_layer(10, weights1)
network.add_layer(4)
network.draw()
Python库matplotlib提供了绘制圆圈和线条的方法。它还允许动画。
我已经编写了一些示例代码来说明如何做到这一点。我的代码生成了一个简单的神经网络静态图,其中每个神经元都连接到前一层中的每个神经元。需要进一步的工作来对其进行动画处理。
我还在Git存储库中提供了它。
from matplotlib import pyplot
from math import cos, sin, atan
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons):
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return horizontal_distance_between_neurons * (number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = neuron_radius * sin(angle)
y_adjustment = neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment))
pyplot.gca().add_line(line)
def draw(self):
for neuron in self.neurons:
neuron.draw()
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
class NeuralNetwork():
def __init__(self):
self.layers = []
def add_layer(self, number_of_neurons):
layer = Layer(self, number_of_neurons)
self.layers.append(layer)
def draw(self):
for layer in self.layers:
layer.draw()
pyplot.axis('scaled')
pyplot.show()
if __name__ == "__main__":
vertical_distance_between_layers = 6
horizontal_distance_between_neurons = 2
neuron_radius = 0.5
number_of_neurons_in_widest_layer = 4
network = NeuralNetwork()
network.add_layer(3)
network.add_layer(4)
network.add_layer(1)
network.draw()
我根据米洛的回答改编了一些部分
from matplotlib import pyplot
from math import cos, sin, atan
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, neuron_radius):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer):
self.vertical_distance_between_layers = 6
self.horizontal_distance_between_neurons = 2
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return self.horizontal_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + self.vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * sin(angle)
y_adjustment = self.neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment))
pyplot.gca().add_line(line)
def draw(self, layerType=0):
for neuron in self.neurons:
neuron.draw( self.neuron_radius )
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
# write Text
x_text = self.number_of_neurons_in_widest_layer * self.horizontal_distance_between_neurons
if layerType == 0:
pyplot.text(x_text, self.y, 'Input Layer', fontsize = 12)
elif layerType == -1:
pyplot.text(x_text, self.y, 'Output Layer', fontsize = 12)
else:
pyplot.text(x_text, self.y, 'Hidden Layer '+str(layerType), fontsize = 12)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.layers = []
self.layertype = 0
def add_layer(self, number_of_neurons ):
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer)
self.layers.append(layer)
def draw(self):
pyplot.figure()
for i in range( len(self.layers) ):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw( i )
pyplot.axis('scaled')
pyplot.axis('off')
pyplot.title( 'Neural Network architecture', fontsize=15 )
pyplot.show()
class DrawNN():
def __init__( self, neural_network ):
self.neural_network = neural_network
def draw( self ):
widest_layer = max( self.neural_network )
network = NeuralNetwork( widest_layer )
for l in self.neural_network:
network.add_layer(l)
network.draw()
现在图层也被标记,轴被删除,构建绘图更容易。只需通过以下方式完成:
network = DrawNN( [2,8,8,1] )
network.draw()
这里构建了一个具有以下结构的网络:
主要问题 我无法理解特定图层的权重图。我使用了一种“无学习”的方法: 我用千层面作为我的神经网络库。 情节很好,但我不知道该怎么解释。 神经网络结构 im使用的结构: 以下是前3层的权重: **关于图片** 所以对我来说,它们看起来是随机的,我无法解释它们! 然而,在Cs231上,它说: Conv/FC过滤器。第二种常见策略是将权重可视化。这些通常在第一个CONV层上最容易解释,该层直接查看原始像
我读过这篇文章“UFLDF”,它发展了autoencoder中隐藏层的可视化,但我很困惑如何可视化卷积神经网络的滤波器。在我看来,对于第一个卷积层,要使滤波器可视化,它需要这个等式: 对于第二个卷积层,它应该将滤波器投射到原始输入空间,但我不知道如何做。
神经网络的一个最引人注目的特点就是它实际上可以计算任何的函数。也就是说,假设某个人给你某种复杂而奇特的函数,$$f(x)$$: 不管这个函数是什么样的,总会有一个神经网络能够对任何可能的输入 $$x$$,网络可以得到对应的值 $$f(x)$$(或者某个足够准确的近似),如图: 即使函数有很多输入或者多个输出,这个结果都是成立的,$$f=f(x_1,...,x_m)$$ 。例如,这里有一个输入为 $
例如,有一个3乘3的图像, 有两个2x2滤波器的卷积神经网络卷积图像 最后,输出的维是2x2x2 我可以将上述程序视为以下内容吗? 由于2x2过滤器,在整个图像上滑动后,我得到了4个小图像 并使用这4幅小图像作为全连接神经网络的输入 最后我也能得到8个输出 我真的不知道CNN中的反向传播,所以我试图从经典的全连接神经网络中理解它。 通过输入一幅小图像,我们可以一次性更新全连接神经网络中的权重,这与
问题内容: 我想从pytorch模型中形象化。我该怎么做?我尝试使用,但出现错误: 问题答案: 需要一个变量(即带有的张量),而不是模型本身。 尝试:
神经网络 (Neural Network) 是机器学习的一个分支,全称人工神经网络(Artificial Neural Network,缩写 ANN),是一种模仿生物神经网络的结构和功能的数学模型或计算模型,用于对函数进行估计或近似。 Perceptron (感知器) 一个典型的神经网络由输入层、一个或多个隐藏层以及输出层组成,其中箭头代表着数据流动的方向,而圆圈代表激活函数(最常用的激活函数为