了解RNN在自然语言处理中的使用
import numpy as np
data = open('data/Hatty_Potter.txt', 'r').read()
# data = open('data/希腊神话故事.txt', 'r').read()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('data has %d chars, %d unique' % (data_size, vocab_size))
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print(char_to_ix)
# 超参数
hidden_size = 100
seq_leghth = 25
learning_rate = 0.001 # 学习率越小,学习时间越小
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size) * 0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size) * 0.01 # input to output
bh = np.zeros((hidden_size, 1))
by = np.zeros((vocab_size, 1))
# print('Wxh', Wxh)
# print('Whh', Whh)
# print('Why', Why)
# print('bh', bh)
# print('by', by)
def lossFunc(inputs, targets, hprev):
"""
loss function
:param input: list of integers
:param targets: list of integers
:param hprev: hprev is Hx1 array of initial hidden state
:return: return the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {} # xs:每一个文字的向量,ps:预测下一个文字,hs:隐藏层的传递,ys:输出结果
hs[-1] = np.copy(hprev)
loss = 0 # 整体损失
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size, 1)) # encode in 1-of-k representation (we place a 0 vector as the t-th input)
xs[t][inputs[t]] = 1 # inside that t-th input we use the integer in "input" list to set the correct
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t - 1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormallized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for the next chars
loss += -np.log(ps[t][targets[t], 0]) # softmax (cross-entropy loss)
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.exp(ys[t]) # probabilities for next chars
dhnest = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnest # backprop into p
dhraw = (1 - hs[t] * hs[t]) * dh # backprop throuth tanh nonlinearity
dbh += dhraw # derivative of hidden bias
dWxh += np.dot(dhraw, xs[t].T) # derivative of input to hidden layer weight
dWhh += np.dot(dhraw, hs[t - 1].T) # derivative of hidden to hidden layer weight
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # 剪切以缓和爆炸的梯度
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]
# prediction, one full forword pass
def sample(h, seed_ix, n):
# create vector
x = np.zeros((vocab_size, 1))
# customize it for our seed char
x[seed_ix] = 1
# list to store generated chars
ixes = []
# for as many charaters as we want to generate
for t in range(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
txt = ''.join(ix_to_char[ix] for ix in ixes)
print('-----\n %s \n------' % (txt,))
n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_leghth # loss at iteration O
while n <= 1000 * 100:
if p + seq_leghth + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p + seq_leghth]]
targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_leghth + 1]]
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFunc(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
# sample from the model now and then
if n % 1000 == 0:
print('iter %d, loss: %d' % (n, smooth_loss)) # print progress
sample(hprev, inputs[0], 200)
# perform parameter update with Adagrad
for param, dparam, men in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
men += dparam * dparam
param += -learning_rate * dparam / np.sqrt(men + 1e-8) # adagrad update
p += seq_leghth # move data pointer
n += 1 # iteration counter