├── makefile ├── LICENSE ├── utils.h ├── README.md ├── utils.cc ├── sparse-nonneg.cc └── sparse.cc /makefile: -------------------------------------------------------------------------------- 1 | CC = g++ 2 | INCLUDES = -I /opt/tools/eigen-eigen-ffa86ffb5570 3 | CFLAGS = -std=c++11 -O3 -ffast-math 4 | LIBS = -fopenmp 5 | SRCS = sparse.cc utils.cc 6 | SRCS_NO = sparse-nonneg.cc utils.cc 7 | OUTPUT = sparse.o 8 | NONNEG = nonneg.o 9 | 10 | make: 11 | $(CC) $(INCLUDES) $(CFLAGS) $(SRCS) -o $(OUTPUT) $(LIBS) 12 | nonneg: 13 | $(CC) $(INCLUDES) $(CFLAGS) $(SRCS_NO) -o $(NONNEG) $(LIBS) 14 | clean: 15 | $(RM) *.o *~ 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Manaal Faruqui 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils.h: -------------------------------------------------------------------------------- 1 | #ifndef UTILS_H 2 | #define UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | using namespace Eigen; 16 | using namespace std; 17 | 18 | typedef Matrix Col; 19 | typedef Matrix Row; 20 | typedef Matrix Mat; 21 | 22 | typedef std::tr1::unordered_map mapStrUnsigned; 23 | typedef std::tr1::unordered_map mapIntCol; 24 | typedef std::tr1::unordered_map mapIntMat; 25 | typedef std::tr1::unordered_map mapIntUnsigned; 26 | typedef std::tr1::unordered_map mapUnsUns; 27 | typedef std::tr1::unordered_map mapUnsDouble; 28 | 29 | typedef std::tr1::unordered_map mapUnsignedStr; 30 | typedef std::tr1::unordered_map mapIntUnsigned; 31 | typedef std::tr1::unordered_map mapStrBool; 32 | 33 | vector split_line(const string&, char); 34 | 35 | void ReadVecsFromFile(const string&, mapStrUnsigned*, vector*); 36 | void ReadVecsFromFile(const string&, mapUnsignedStr*, vector*); 37 | 38 | void ElemwiseTanh(Col*); 39 | void ElemwiseTanhGrad(const Col&, Col*); 40 | 41 | void ElemwiseAndrewNsnl(Col*); 42 | void ElemwiseAndrewNsnlGrad(const Col&, Col*); 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #Sparse Coding 2 | Manaal Faruqui, manaalfar@gmail.com 3 | 4 | This tool implements sparse coding for converting dense word vector representations to highly sparse vectors. The implementation can be run on multiple cores in parallel with asynchronous updates. The sparsity is introduced in the word vectors using L1 regularization. For technical details please refer to Faruqui et al (2015). 5 | 6 | ###Data you need 7 | 8 | Word Vector File. Each vector file should have one word vector per line as follows (space delimited):- 9 | 10 | ```the -1.0 2.4 -0.3 ...``` 11 | 12 | ###Compile 13 | 14 | You need to download the latest Eigen stable release from here: http://eigen.tuxfamily.org/index.php?title=Main_Page 15 | 16 | Unzip the folder and provide its path in the makefile: 17 | INCLUDES = -I PATH_TO_EIGEN 18 | 19 | After this just execute the following command: 20 | 21 | For sparse coding: ```make``` 22 | 23 | For non-negative sparse coding: ```make nonneg``` 24 | 25 | ###Running the executable 26 | 27 | For sparse coding: ```sparse.o``` 28 | 29 | For non-negative sparse coding: ```nonneg.o``` 30 | 31 | Usage: ```./sparse.o vec_corpus factor l1_reg l2_reg num_cores outfilename``` 32 | 33 | Example: ```./sparse.o sample_vecs.txt 10 0.5 1e-5 1 out_vecs.txt``` 34 | 35 | This example would expand the vectors in sample_vecs.txt to 10 times their original length. 36 | 37 | ###Reference 38 | 39 | ``` 40 | @InProceedings{faruqui:2015:sparse, 41 | author = {Faruqui, Manaal and Tsvetkov, Yulia and Yogatama, Dani and Dyer, Chris and Smith, Noah A.}, 42 | title = {Sparse Overcomplete Word Vector Representations}, 43 | booktitle = {Proceedings of ACL}, 44 | year = {2015}, 45 | } 46 | ``` 47 | -------------------------------------------------------------------------------- /utils.cc: -------------------------------------------------------------------------------- 1 | #include "utils.h" 2 | 3 | using namespace std; 4 | using namespace Eigen; 5 | 6 | /* Try splitting over all whitespaces not just space */ 7 | vector split_line(const string& line, char delim) { 8 | vector words; 9 | stringstream ss(line); 10 | string item; 11 | while (std::getline(ss, item, delim)) { 12 | if (!item.empty()) 13 | words.push_back(item); 14 | } 15 | return words; 16 | } 17 | 18 | void ReadVecsFromFile(const string& vec_file_name, mapStrUnsigned* t_vocab, 19 | vector* word_vecs) { 20 | ifstream vec_file(vec_file_name.c_str()); 21 | mapStrUnsigned& vocab = *t_vocab; 22 | unsigned vocab_size = 0; 23 | if (vec_file.is_open()) { 24 | string line; 25 | vocab.clear(); 26 | while (getline(vec_file, line)) { 27 | vector vector_stuff = split_line(line, ' '); 28 | string word = vector_stuff[0]; 29 | Col word_vec = Col::Zero(vector_stuff.size()-1); 30 | for (unsigned i = 0; i < word_vec.size(); ++i) 31 | word_vec(i, 0) = stof(vector_stuff[i+1]); 32 | vocab[word] = vocab_size++; 33 | word_vecs->push_back(word_vec); 34 | } 35 | cerr << "Read: " << vec_file_name << endl; 36 | cerr << "Vocab length: " << word_vecs->size() << endl; 37 | cerr << "Vector length: " << (*word_vecs)[0].size() << endl << endl; 38 | vec_file.close(); 39 | 40 | assert (word_vecs->size() == vocab.size()); 41 | } else { 42 | cerr << "Could not open " << vec_file_name << endl; 43 | exit(0); 44 | } 45 | } 46 | 47 | void ReadVecsFromFile(const string& vec_file_name, mapUnsignedStr* t_vocab, 48 | vector* word_vecs) { 49 | ifstream vec_file(vec_file_name.c_str()); 50 | mapUnsignedStr& vocab = *t_vocab; 51 | unsigned vocab_size = 0; 52 | if (vec_file.is_open()) { 53 | string line; 54 | vocab.clear(); 55 | while (getline(vec_file, line)) { 56 | vector vector_stuff = split_line(line, ' '); 57 | string word = vector_stuff[0]; 58 | Col word_vec = Col::Zero(vector_stuff.size()-1); 59 | for (unsigned i = 0; i < word_vec.size(); ++i) 60 | word_vec(i, 0) = stof(vector_stuff[i+1]); 61 | vocab[vocab_size++] = word; 62 | word_vecs->push_back(word_vec); 63 | } 64 | cerr << "Read: " << vec_file_name << endl; 65 | cerr << "Vocab length: " << word_vecs->size() << endl; 66 | cerr << "Vector length: " << (*word_vecs)[0].size() << endl << endl; 67 | vec_file.close(); 68 | 69 | assert (word_vecs->size() == vocab.size()); 70 | } else { 71 | cerr << "Could not open " << vec_file_name; 72 | exit(0); 73 | } 74 | } 75 | 76 | void ElemwiseTanh(Col* v) { 77 | for (unsigned i = 0; i < v->rows(); ++i) 78 | (*v)(i, 0) = tanh((*v)(i, 0)); 79 | } 80 | 81 | /* v is the vector after taking tanh() */ 82 | void ElemwiseTanhGrad(const Col &v, Col* g) { 83 | for (int i = 0; i < v.rows(); ++i) 84 | (*g)(i, 0) = 1 - pow(v(i, 0), 2); 85 | } 86 | 87 | void ElemwiseAndrewNsnl(Col *v) { 88 | for (int i = 0; i < v->rows(); ++i) { 89 | double x = (*v)(i, 0); 90 | if (x) { 91 | bool flag = (x < 0); 92 | double y_n = flag ? -x : x; 93 | for (unsigned i = 0; i < 12; ++i) { 94 | const double sq = y_n * y_n; 95 | y_n = (2 * sq * y_n / 3 + x) / (sq + 1); 96 | } 97 | (*v)(i, 0) = flag ? -y_n : y_n; 98 | } 99 | } 100 | } 101 | 102 | void ElemwiseAndrewNsnlGrad(const Col &v, Col* g) { 103 | for (int i = 0; i < v.rows(); ++i) 104 | (*g)(i, 0) = 1 / (1 + pow(v(i, 0), 2)); 105 | } 106 | 107 | 108 | double CosineSim(const Col& ci, const Col& cj) { 109 | return ci.dot(cj)/sqrt(ci.squaredNorm() * cj.squaredNorm()); 110 | } 111 | -------------------------------------------------------------------------------- /sparse-nonneg.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "utils.h" 15 | 16 | #define RHO 0.95 17 | #define EPSILON 0.000001 18 | #define RATE 0.05 19 | 20 | using namespace std; 21 | using namespace Eigen; 22 | 23 | template int sgn(T val) { 24 | return (T(0) < val) - (val < T(0)); 25 | } 26 | 27 | /* General parameters of the model */ 28 | template 29 | class Param { 30 | 31 | public: 32 | T var; 33 | 34 | void Init(const int& rows, const int& cols) { 35 | if (cols == 1) { 36 | var = (0.6 / sqrt (rows)) * T::Random(rows, 1); 37 | _del_var = T::Zero(rows, 1); 38 | _del_grad = T::Zero(rows, 1); 39 | } 40 | var = (0.6 / sqrt (rows + cols)) * T::Random(rows, cols); 41 | _del_var = T::Zero(rows, cols); 42 | _del_grad = T::Zero(rows, cols); 43 | _grad_sum = T::Zero(rows, cols); 44 | _epsilon = EPSILON * T::Ones(rows, cols); 45 | } 46 | 47 | void AdagradUpdate(const double& rate, const T& grad) { 48 | _del_grad += grad.cwiseAbs2(); 49 | _grad_sum += grad; 50 | var -= rate * grad.cwiseQuotient(_del_grad.cwiseSqrt()); 51 | } 52 | 53 | void AdagradUpdateWithL1Reg(const double& rate, const T& grad, 54 | const double& l1_reg) { 55 | _update_num += 1; 56 | _del_grad += grad.cwiseAbs2(); 57 | _grad_sum += grad; 58 | for (int i = 0; i < var.rows(); ++i) { 59 | for (int j = 0; j < var.cols(); ++j) { 60 | double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg; 61 | if (diff <= 0) 62 | var(i, j) = 0; 63 | else 64 | var(i, j) = -sgn(_grad_sum(i, j)) * rate * diff / sqrt(_del_grad(i, j)); 65 | } 66 | } 67 | } 68 | 69 | void AdagradUpdateWithL1RegNonNeg(const double& rate, const T& grad, 70 | const double& l1_reg) { 71 | _update_num += 1; 72 | _del_grad += grad.cwiseAbs2(); 73 | _grad_sum += grad; 74 | for (int i = 0; i < var.rows(); ++i) { 75 | for (int j = 0; j < var.cols(); ++j) { 76 | double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg; 77 | if (diff <= 0) 78 | var(i, j) = 0; 79 | else { 80 | double temp = -sgn(_grad_sum(i, j)) * rate * diff / 81 | sqrt(_del_grad(i, j)); 82 | if (temp >= 0) var(i, j) = temp; 83 | else var(i, j) = 0; 84 | } 85 | } 86 | } 87 | } 88 | 89 | void WriteToFile(ofstream& out) { 90 | out << var.rows() << " " << var.cols() << " "; 91 | for (unsigned i = 0; i < var.rows(); ++i) { 92 | for(unsigned j = 0; j < var.cols(); ++j) 93 | out << var(i, j) << " "; 94 | } 95 | out << endl; 96 | } 97 | 98 | void ReadFromFile(ifstream& in) { 99 | string line; 100 | getline(in, line); 101 | vector data = split_line(line, ' '); 102 | int rows = stoi(data[0]), cols = stoi(data[1]); 103 | var = T::Zero(rows, cols); 104 | for (int i = 2; i < data.size(); ++i) 105 | var((i-2)/cols, (i-2)%cols) = stod(data[i]); 106 | } 107 | 108 | private: 109 | T _del_var, _del_grad, _grad_sum; // updates/gradient memory 110 | T _epsilon; 111 | int _update_num = 0; 112 | }; 113 | 114 | /* Main class definition that learns the word vectors */ 115 | class Model { 116 | 117 | public: 118 | /* The parameters of the model */ 119 | vector > atom; 120 | Param dict; 121 | int vec_len, factor; 122 | 123 | Model(const int& times, const int& vector_len, const int& vocab_len) { 124 | vec_len = vector_len; 125 | factor = times; 126 | dict.Init(vec_len, factor * vec_len); 127 | /* Params initialization */ 128 | for (int i = 0; i < vocab_len; ++i) { 129 | Param vec; 130 | vec.Init(factor * vec_len, 1); 131 | atom.push_back(vec); 132 | } 133 | } 134 | 135 | template void NonLinearity(T* vec) { ElemwiseHardTanh(vec); } 136 | 137 | void PredictVector(const Col& word_vec, const int& word_index, 138 | Col* pred_vec) { 139 | *pred_vec = dict.var * atom[word_index].var; 140 | } 141 | 142 | void UpdateParams(const int& word_index, const double& rate, 143 | const Col& diff_vec, const double& l1_reg, 144 | const double& l2_reg) { 145 | Mat dict_grad = -2 * diff_vec * atom[word_index].var.transpose() + 146 | 2 * l2_reg * dict.var; 147 | dict.AdagradUpdate(rate, dict_grad); 148 | Col atom_elem_grad = -2 * dict.var.transpose() * diff_vec; 149 | atom[word_index].AdagradUpdateWithL1RegNonNeg(rate, atom_elem_grad, 150 | l1_reg); 151 | } 152 | 153 | void WriteVectorsToFile(const string& filename, 154 | const mapUnsignedStr& vocab) { 155 | ofstream outfile(filename); 156 | if (outfile.is_open()) { 157 | outfile.precision(3); 158 | for(unsigned i = 0; i < atom.size(); ++i) { 159 | auto it = vocab.find(i); 160 | outfile << it->second << " "; 161 | for (unsigned j = 0; j < atom[i].var.rows(); ++j) 162 | outfile << atom[i].var[j] << " "; 163 | outfile << endl; 164 | } 165 | outfile.close(); 166 | cerr << "\nWritten vectors to: " << filename; 167 | } else { 168 | cerr << "\nFailed to open " << filename; 169 | } 170 | } 171 | 172 | void WriteDictToFile(const string& filename) { 173 | ofstream outfile(filename); 174 | if (outfile.is_open()) { 175 | outfile.precision(3); 176 | dict.WriteToFile(outfile); 177 | outfile.close(); 178 | cerr << "\nWritten atom to: " << filename; 179 | } else { 180 | cerr << "\nFailed to open " << filename; 181 | } 182 | } 183 | 184 | }; 185 | 186 | void Train(const string& out_file, const int& factor, 187 | const int& cores, const double& l1_reg, const double& l2_reg, 188 | const vector& word_vecs, const mapUnsignedStr& vocab) { 189 | Model model(factor, word_vecs[0].size(), word_vecs.size()); 190 | double avg_error = 1, prev_avg_err = 0; 191 | int iter = 0; 192 | while (iter < 20 || (avg_error > 0.05 && iter < 75 && abs(avg_error - prev_avg_err) > 0.001)) { 193 | iter += 1; 194 | cerr << "\nIteration: " << iter << endl; 195 | unsigned num_words = 0; 196 | double total_error = 0, atom_l1_norm = 0; 197 | int word_id; 198 | #pragma omp parallel num_threads(cores) shared(total_error,atom_l1_norm) 199 | #pragma omp for nowait private(word_id) 200 | for (int word_id = 0; word_id < word_vecs.size(); ++word_id) { 201 | /* Predict the i-th word and compute error */ 202 | Col pred_vec; 203 | model.PredictVector(word_vecs[word_id], word_id, &pred_vec); 204 | Col diff_vec = word_vecs[word_id] - pred_vec; 205 | double error = diff_vec.squaredNorm(); 206 | #pragma omp critical 207 | { 208 | total_error += error; 209 | num_words += 1; 210 | atom_l1_norm += model.atom[word_id].var.lpNorm<1>(); 211 | cerr << num_words << "\r"; 212 | } 213 | model.UpdateParams(word_id, RATE, diff_vec, l1_reg, l2_reg); 214 | } 215 | prev_avg_err = avg_error; 216 | avg_error = total_error / num_words; 217 | cerr << "\nError per example: "<< total_error / num_words; 218 | cerr << "\nDict L2 norm: " << model.dict.var.lpNorm<2>(); 219 | cerr << "\nAvg Atom L1 norm: " << atom_l1_norm / num_words; 220 | } 221 | model.WriteVectorsToFile(out_file, vocab); 222 | model.WriteDictToFile(out_file + "_dict"); 223 | } 224 | 225 | int main(int argc, char **argv) { 226 | mapUnsignedStr vocab; 227 | vector word_vecs; 228 | if (argc == 7) { 229 | string vec_corpus = argv[1]; 230 | int factor = stoi(argv[2]); 231 | double l1_reg = stod(argv[3]), l2_reg = stod(argv[4]); 232 | int num_cores = stoi(argv[5]); 233 | string outfilename = argv[6]; 234 | 235 | ReadVecsFromFile(vec_corpus, &vocab, &word_vecs); 236 | 237 | cerr << "Model specification" << endl; 238 | cerr << "----------------" << endl; 239 | cerr << "Vector length: " << word_vecs[0].size() << endl; 240 | cerr << "Dictionary length: " << factor * word_vecs[0].size() << endl; 241 | cerr << "L2 Reg (Dict): " << l2_reg << endl; 242 | cerr << "L1 Reg (Atom): " << l1_reg << endl; 243 | cerr << "Number of Cores: " << num_cores << endl; 244 | cerr << "----------------" << endl; 245 | 246 | Train(outfilename, factor, num_cores, l1_reg, l2_reg, word_vecs, vocab); 247 | } else { 248 | cerr << "Usage: "<< argv[0] << " vec_corpus factor l1_reg l2_reg " 249 | << "num_cores outfilename\n"; 250 | } 251 | return 1; 252 | } 253 | -------------------------------------------------------------------------------- /sparse.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "utils.h" 15 | 16 | #define RHO 0.95 17 | #define EPSILON 0.000001 18 | #define RATE 0.05 19 | 20 | using namespace std; 21 | using namespace Eigen; 22 | 23 | template int sgn(T val) { 24 | return (T(0) < val) - (val < T(0)); 25 | } 26 | 27 | /* General parameters of the model */ 28 | template 29 | class Param { 30 | 31 | public: 32 | T var; 33 | 34 | void Init(const int& rows, const int& cols) { 35 | if (cols == 1) { 36 | var = (0.6 / sqrt (rows)) * T::Random(rows, 1); 37 | _del_var = T::Zero(rows, 1); 38 | _del_grad = T::Zero(rows, 1); 39 | } 40 | var = (0.6 / sqrt (rows + cols)) * T::Random(rows, cols); 41 | _del_var = T::Zero(rows, cols); 42 | _del_grad = T::Zero(rows, cols); 43 | _grad_sum = T::Zero(rows, cols); 44 | _epsilon = EPSILON * T::Ones(rows, cols); 45 | } 46 | 47 | void AdagradUpdate(const double& rate, const T& grad) { 48 | _del_grad += grad.cwiseAbs2(); 49 | _grad_sum += grad; 50 | var -= rate * grad.cwiseQuotient(_del_grad.cwiseSqrt()); 51 | } 52 | 53 | void AdagradUpdateWithL1Reg(const double& rate, const T& grad, 54 | const double& l1_reg) { 55 | _update_num += 1; 56 | _del_grad += grad.cwiseAbs2(); 57 | _grad_sum += grad; 58 | for (int i = 0; i < var.rows(); ++i) { 59 | for (int j = 0; j < var.cols(); ++j) { 60 | double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg; 61 | if (diff <= 0) 62 | var(i, j) = 0; 63 | else 64 | var(i, j) = -sgn(_grad_sum(i, j)) * rate * diff / sqrt(_del_grad(i, j)); 65 | } 66 | } 67 | } 68 | 69 | void AdagradUpdateWithL1RegNonNeg(const double& rate, const T& grad, 70 | const double& l1_reg) { 71 | _update_num += 1; 72 | _del_grad += grad.cwiseAbs2(); 73 | _grad_sum += grad; 74 | for (int i = 0; i < var.rows(); ++i) { 75 | for (int j = 0; j < var.cols(); ++j) { 76 | double diff = abs(_grad_sum(i, j)) - _update_num * l1_reg; 77 | if (diff <= 0) 78 | var(i, j) = 0; 79 | else { 80 | double temp = -sgn(_grad_sum(i, j)) * rate * diff / 81 | sqrt(_del_grad(i, j)); 82 | if (temp >= 0) var(i, j) = temp; 83 | else var(i, j) = 0; 84 | } 85 | } 86 | } 87 | } 88 | 89 | void WriteToFile(ofstream& out) { 90 | out << var.rows() << " " << var.cols() << " "; 91 | for (unsigned i = 0; i < var.rows(); ++i) { 92 | for(unsigned j = 0; j < var.cols(); ++j) 93 | out << var(i, j) << " "; 94 | } 95 | out << endl; 96 | } 97 | 98 | void ReadFromFile(ifstream& in) { 99 | string line; 100 | getline(in, line); 101 | vector data = split_line(line, ' '); 102 | int rows = stoi(data[0]), cols = stoi(data[1]); 103 | var = T::Zero(rows, cols); 104 | for (int i = 2; i < data.size(); ++i) 105 | var((i-2)/cols, (i-2)%cols) = stod(data[i]); 106 | } 107 | 108 | private: 109 | T _del_var, _del_grad, _grad_sum; // updates/gradient memory 110 | T _epsilon; 111 | int _update_num = 0; 112 | }; 113 | 114 | /* Main class definition that learns the word vectors */ 115 | class Model { 116 | 117 | public: 118 | /* The parameters of the model */ 119 | vector > atom; 120 | Param dict; 121 | int vec_len, factor; 122 | 123 | Model(const int& times, const int& vector_len, const int& vocab_len) { 124 | vec_len = vector_len; 125 | factor = times; 126 | dict.Init(vec_len, factor * vec_len); 127 | /* Params initialization */ 128 | for (int i = 0; i < vocab_len; ++i) { 129 | Param vec; 130 | vec.Init(factor * vec_len, 1); 131 | atom.push_back(vec); 132 | } 133 | } 134 | 135 | template void NonLinearity(T* vec) { ElemwiseHardTanh(vec); } 136 | 137 | void PredictVector(const Col& word_vec, const int& word_index, 138 | Col* pred_vec) { 139 | *pred_vec = dict.var * atom[word_index].var; 140 | } 141 | 142 | void UpdateParams(const int& word_index, const double& rate, 143 | const Col& diff_vec, const double& l1_reg, 144 | const double& l2_reg) { 145 | Mat dict_grad = -2 * diff_vec * atom[word_index].var.transpose() + 146 | 2 * l2_reg * dict.var; 147 | dict.AdagradUpdate(rate, dict_grad); 148 | Col atom_elem_grad = -2 * dict.var.transpose() * diff_vec; 149 | atom[word_index].AdagradUpdateWithL1Reg(rate, atom_elem_grad, l1_reg); 150 | } 151 | 152 | void WriteVectorsToFile(const string& filename, 153 | const mapUnsignedStr& vocab) { 154 | ofstream outfile(filename); 155 | if (outfile.is_open()) { 156 | outfile.precision(3); 157 | for(unsigned i = 0; i < atom.size(); ++i) { 158 | auto it = vocab.find(i); 159 | outfile << it->second << " "; 160 | for (unsigned j = 0; j < atom[i].var.rows(); ++j) 161 | outfile << atom[i].var[j] << " "; 162 | outfile << endl; 163 | } 164 | outfile.close(); 165 | cerr << "\nWritten vectors to: " << filename; 166 | } else { 167 | cerr << "\nFailed to open " << filename; 168 | } 169 | } 170 | 171 | void WriteDictToFile(const string& filename) { 172 | ofstream outfile(filename); 173 | if (outfile.is_open()) { 174 | outfile.precision(3); 175 | dict.WriteToFile(outfile); 176 | outfile.close(); 177 | cerr << "\nWritten atom to: " << filename; 178 | } else { 179 | cerr << "\nFailed to open " << filename; 180 | } 181 | } 182 | 183 | }; 184 | 185 | void Train(const string& out_file, const int& factor, 186 | const int& cores, const double& l1_reg, const double& l2_reg, 187 | const vector& word_vecs, const mapUnsignedStr& vocab) { 188 | Model model(factor, word_vecs[0].size(), word_vecs.size()); 189 | double avg_error = 1, prev_avg_err = 0; 190 | int iter = 0; 191 | while (iter < 20 || (avg_error > 0.05 && iter < 50 && 192 | abs(avg_error - prev_avg_err) > 0.005)) { 193 | iter += 1; 194 | cerr << "\nIteration: " << iter << endl; 195 | unsigned num_words = 0; 196 | double total_error = 0, atom_l1_norm = 0; 197 | int word_id; 198 | #pragma omp parallel num_threads(cores) shared(total_error,atom_l1_norm) 199 | #pragma omp for nowait private(word_id) 200 | for (int word_id = 0; word_id < word_vecs.size(); ++word_id) { 201 | /* Predict the i-th word and compute error */ 202 | Col pred_vec; 203 | model.PredictVector(word_vecs[word_id], word_id, &pred_vec); 204 | Col diff_vec = word_vecs[word_id] - pred_vec; 205 | double error = diff_vec.squaredNorm(); 206 | #pragma omp critical 207 | { 208 | total_error += error; 209 | num_words += 1; 210 | atom_l1_norm += model.atom[word_id].var.lpNorm<1>(); 211 | cerr << num_words << "\r"; 212 | } 213 | model.UpdateParams(word_id, RATE, diff_vec, l1_reg, l2_reg); 214 | } 215 | prev_avg_err = avg_error; 216 | avg_error = total_error / num_words; 217 | cerr << "\nError per example: "<< total_error / num_words; 218 | cerr << "\nDict L2 norm: " << model.dict.var.lpNorm<2>(); 219 | cerr << "\nAvg Atom L1 norm: " << atom_l1_norm / num_words; 220 | //model.WriteVectorsToFile(out_file, vocab); 221 | } 222 | model.WriteVectorsToFile(out_file, vocab); 223 | model.WriteDictToFile(out_file + "_dict"); 224 | } 225 | 226 | int main(int argc, char **argv) { 227 | mapUnsignedStr vocab; 228 | vector word_vecs; 229 | if (argc == 7) { 230 | string vec_corpus = argv[1]; 231 | int factor = stoi(argv[2]); 232 | double l1_reg = stod(argv[3]), l2_reg = stod(argv[4]); 233 | int num_cores = stoi(argv[5]); 234 | string outfilename = argv[6]; 235 | 236 | ReadVecsFromFile(vec_corpus, &vocab, &word_vecs); 237 | 238 | cerr << "Model specification" << endl; 239 | cerr << "----------------" << endl; 240 | cerr << "Input vector length: " << word_vecs[0].size() << endl; 241 | cerr << "Output vector length: " << factor * word_vecs[0].size() << endl; 242 | cerr << "L2 Reg (Dict): " << l2_reg << endl; 243 | cerr << "L1 Reg (Atom): " << l1_reg << endl; 244 | cerr << "Number of Cores: " << num_cores << endl; 245 | cerr << "----------------" << endl; 246 | 247 | Train(outfilename, factor, num_cores, l1_reg, l2_reg, word_vecs, vocab); 248 | } else { 249 | cerr << "Usage: "<< argv[0] << " vec_corpus factor l1_reg l2_reg " 250 | << "num_cores outfilename\n"; 251 | } 252 | return 1; 253 | } 254 | --------------------------------------------------------------------------------