├── README.md ├── code_01_subtraction.py ├── code_02_moons.py ├── code_03_moons_fun.py ├── code_04_use_module.py ├── code_05_L2.py ├── code_06_Dropout.py ├── code_07_Multi-sampleDropout.py ├── code_08_BN.py ├── code_09_BNdetail.py ├── code_10_CrossEntropy.py ├── code_11_skip-gram.py ├── code_12_CONV.py ├── code_13_pooling.py ├── code_14_TextCNN.py ├── code_15_rnnwordtest.py ├── code_16_AttLSTMModel.py ├── code_17_Transformer.py ├── code_18_pipline.py ├── code_19_BERTTest.py ├── code_20_GPT2Test.py ├── code_21_BERT_CH.py ├── code_22_TextCNNInterpret.py ├── code_23_GNN_BERT.py ├── code_24_BERT_PROPN.py ├── code_25_BERT_NoPUNC.py ├── code_26_RGCNDGL.py ├── code_27_spellgcn.py ├── code_28_CDial.py ├── code_29_serving.py └── 人体阴阳与电能.txt /README.md: -------------------------------------------------------------------------------- 1 | 《基于BERT模型的自然语言处理实战》随书代码 2 | 3 | 随书数据资源可在官网下载:https://www.aianaconda.com/index/bert 4 | -------------------------------------------------------------------------------- /code_01_subtraction.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: 代码医生工作室 4 | @公众号:xiangyuejiqiren (内有更多优秀文章及学习资料) 5 | @来源: 配套代码 6 | @配套代码技术支持:bbs.aianaconda.com 7 | Created on Thu Mar 30 09:43:58 2017 8 | """ 9 | 10 | import copy, numpy as np 11 | np.random.seed(0) #随机数生成器的种子,可以每次得到一样的值 12 | # compute sigmoid nonlinearity 13 | def sigmoid(x): #激活函数 14 | output = 1/(1+np.exp(-x)) 15 | return output 16 | # convert output of sigmoid function to its derivative 17 | def sigmoid_output_to_derivative(output):#激活函数的导数 18 | return output*(1-output) 19 | 20 | 21 | int2binary = {} #整数到其二进制表示的映射 22 | binary_dim = 8 #暂时制作256以内的减法 23 | ## 计算0-256的二进制表示 24 | largest_number = pow(2,binary_dim) 25 | binary = np.unpackbits( 26 | np.array([range(largest_number)],dtype=np.uint8).T,axis=1) 27 | for i in range(largest_number): 28 | int2binary[i] = binary[i] 29 | 30 | # input variables 31 | alpha = 0.9 #学习速率 32 | input_dim = 2 #输入的维度是2 33 | hidden_dim = 16 34 | output_dim = 1 #输出维度为1 35 | 36 | # initialize neural network weights 37 | synapse_0 = (2*np.random.random((input_dim,hidden_dim)) - 1)*0.05 #维度为2*16, 2是输入维度,16是隐藏层维度 38 | synapse_1 = (2*np.random.random((hidden_dim,output_dim)) - 1)*0.05 39 | synapse_h = (2*np.random.random((hidden_dim,hidden_dim)) - 1)*0.05 40 | # => [-0.05, 0.05), 41 | 42 | # 用于存放反向传播的权重更新值 43 | synapse_0_update = np.zeros_like(synapse_0) 44 | synapse_1_update = np.zeros_like(synapse_1) 45 | synapse_h_update = np.zeros_like(synapse_h) 46 | 47 | # training 48 | for j in range(10000): 49 | 50 | #生成一个数字a 51 | a_int = np.random.randint(largest_number) 52 | #生成一个数字b,b的最大值取的是largest_number/2,作为被减数,让它小一点。 53 | b_int = np.random.randint(largest_number/2) 54 | #如果生成的b大了,那么交换一下 55 | if a_int