import torch
import torch.nn.functional as F # 激勵函數都在這
from torch.autograd import Variable
# 做一些假資料來觀看圖像
x = torch.linspace(-5, 5, 200) # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy() # 換成 numpy array, 出圖時用
# 幾種常用的 激勵函數
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
# y_softmax = F.softmax(x) softmax 比較特殊, 不能直接顯示, 不過他是關於概率的, 用於分類
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')
plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')
plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')
plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')
plt.show()
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]])
variable = Variable(tensor, requires_grad=True)
print(tensor)
print(variable)
t_out = torch.mean(tensor*tensor)
v_out = torch.mean(variable*variable)
#v_out = 1/4 x sum(variable x variable)
print(t_out)
print(v_out)
v_out.backward()
#d(v_out)/d(variable)=1/4 x 2 x variable = 1/2 x variable
print(variable.grad)
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
tensor(7.5000)
tensor(7.5000, grad_fn=<MeanBackward0>)
tensor([[0.5000, 1.0000],
[1.5000, 2.0000]])