├── .gitignore
├── .idea
├── .name
├── MLDeltaLearning.iml
├── encodings.xml
├── misc.xml
├── modules.xml
├── vcs.xml
└── workspace.xml
├── LICENSE
├── README.md
├── example.rb
├── ml_active_function.rb
├── ml_active_method.rb
└── ml_delta.rb
/.gitignore:
--------------------------------------------------------------------------------
1 | *.gem
2 | *.rbc
3 | /.config
4 | /coverage/
5 | /InstalledFiles
6 | /pkg/
7 | /spec/reports/
8 | /spec/examples.txt
9 | /test/tmp/
10 | /test/version_tmp/
11 | /tmp/
12 |
13 | ## Specific to RubyMotion:
14 | .dat*
15 | .repl_history
16 | build/
17 |
18 | ## Documentation cache and generated files:
19 | /.yardoc/
20 | /_yardoc/
21 | /doc/
22 | /rdoc/
23 |
24 | ## Environment normalization:
25 | /.bundle/
26 | /vendor/bundle
27 | /lib/bundler/man/
28 |
29 | # for a library or gem, you might want to ignore these files since the code is
30 | # intended to run in multiple environments; otherwise, check them in:
31 | # Gemfile.lock
32 | # .ruby-version
33 | # .ruby-gemset
34 |
35 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
36 | .rvmrc
37 |
--------------------------------------------------------------------------------
/.idea/.name:
--------------------------------------------------------------------------------
1 | MLDeltaLearning
--------------------------------------------------------------------------------
/.idea/MLDeltaLearning.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
56 |
57 |
58 |
59 |
60 | true
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 | 1454333352140
192 |
193 | 1454333352140
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016 Kuo-Ming Lin (ilovekalvar@gmail.com)
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
21 | ... bla bla ...
22 | Just up to you, ha.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## About
2 |
3 | MLDelta is implemented by Ruby and Delta Learning Method in Machine Learning that is also a supervisor and used gradient method to find out the best solution.
4 |
5 | ## How To Get Started
6 |
7 | #### Require
8 | ``` ruby
9 | require './ml_delta'
10 | ```
11 |
12 | #### Normal Case
13 | ``` ruby
14 | delta = MLDelta.new
15 | delta.active_method = MLActiveMethod::TANH
16 | delta.learning_rate = 0.8
17 | delta.convergence_value = 0.001
18 | delta.max_iteration = 1000
19 | delta.add_patterns([1.0, -2.0, 0.0, -1.0], -1.0)
20 | delta.add_patterns([0.0, 1.5, -0.5, -1.0], 1.0)
21 | delta.setup_weights([1.0, -1.0, 0.0, 0.5])
22 |
23 | # Setting the block of per iteration training
24 | iteration_block = Proc.new do |iteration, weights|
25 | puts "iteration : #{iteration}, weights : #{weights}"
26 | end
27 |
28 | # Setting the block of completion when it finish training
29 | completion_block = Proc.new do |success, weights, total_iteration|
30 | puts "success : #{success}, weights : #{weights}, total_iteration : #{total_iteration}"
31 |
32 | # Verifying the pattern
33 | delta.direct_output_by_patterns [1.0, -2.0, 0.0, -1.0] { |predication|
34 | puts "predication result is #{predication}"
35 | }
36 | end
37 |
38 | # Start in training
39 | delta.training_with_iteration(iteration_block, completion_block)
40 | ```
41 |
42 | #### Lazy Case
43 | 1). If you wish automatic setups all weights of pattern that you could use delta.random_weights() to instead of delta.setup_weights().
44 | ``` ruby
45 | # The random scopes of weights between (min, max)
46 | delta.setup_random_scopes(-0.5, 0.5)
47 | delta.random_weights()
48 | ```
49 |
50 | 2). If you just wanna see the result without iteration running that you could directly use the method as below :
51 |
52 | ``` ruby
53 | delta.training_with_completion {
54 | |success, weights, total_iteration|
55 | puts "success : #{success}, weights : #{weights}, total_iteration : #{total_iteration}"
56 | }
57 | ```
58 |
59 | ## Version
60 |
61 | V1.0.2
62 |
63 | ## LICENSE
64 |
65 | MIT.
66 |
67 |
--------------------------------------------------------------------------------
/example.rb:
--------------------------------------------------------------------------------
1 | require './ml_delta'
2 |
3 | delta = MLDelta.new
4 | delta.active_method = MLActiveMethod::TANH
5 | delta.learning_rate = 0.8
6 | delta.convergence_value = 0.001
7 | delta.max_iteration = 1000
8 | delta.add_patterns([1.0, -2.0, 0.0, -1.0], -1.0)
9 | delta.add_patterns([0.0, 1.5, -0.5, -1.0], 1.0)
10 | delta.setup_weights([1.0, -1.0, 0.0, 0.5])
11 | #delta.setup_random_scopes(-0.5, 0.5)
12 | #delta.random_weights()
13 |
14 | iteration_block = Proc.new do |iteration, weights|
15 | puts "iteration : #{iteration}, weights : #{weights}"
16 | end
17 |
18 | completion_block = Proc.new do |success, weights, total_iteration|
19 | puts "success : #{success}, weights : #{weights}, total_iteration : #{total_iteration}"
20 | delta.direct_output_by_patterns [1.0, -2.0, 0.0, -1.0] { |predication|
21 | puts "predication result is #{predication}"
22 | }
23 | end
24 |
25 | delta.training_with_iteration(iteration_block, completion_block)
26 |
27 | # delta.training_with_completion {
28 | # |success, weights, total_iteration|
29 | # puts "success : #{success}, weights : #{weights}, total_iteration : #{total_iteration}"
30 | # }
31 |
--------------------------------------------------------------------------------
/ml_active_function.rb:
--------------------------------------------------------------------------------
1 | # Activition Functions
2 |
3 | class MLActiveFunction
4 | def tanh(x)
5 | ( 2.0 / ( 1.0 + Math.exp(-1.0 * x) ) ) - 1.0
6 | end
7 |
8 | def sigmoid(x)
9 | 1.0 / ( 1.0 + Math.exp(-1.0 * x) )
10 | end
11 |
12 | def sgn(x)
13 | ( x >= 0.0 ) ? 1.0 : -1.0
14 | end
15 |
16 | def rbf(x, sigma)
17 | Math.exp((-x) / (2.0 * sigma * sigma))
18 | end
19 |
20 | def dash_tanh(output)
21 | ( 1.0 - ( output * output ) ) * 0.5
22 | end
23 |
24 | def dash_sigmoid(output)
25 | ( 1.0 - output ) * output
26 | end
27 |
28 | def dash_sgn(output)
29 | output
30 | end
31 |
32 | def dash_rbf(output, sigma)
33 | -((2.0 * output) / (2.0 * sigma * sigma)) * Math.exp((-output) / (2.0 * sigma * sigma))
34 | end
35 | end
36 |
--------------------------------------------------------------------------------
/ml_active_method.rb:
--------------------------------------------------------------------------------
1 | module MLActiveMethod
2 | SGN = 0
3 | SIGMOID = 1
4 | TANH = 2
5 | RBF = 3
6 | end
7 |
--------------------------------------------------------------------------------
/ml_delta.rb:
--------------------------------------------------------------------------------
1 | # Dynamic loading and matching the PATH of files
2 | $LOAD_PATH.unshift(File.dirname(__FILE__)) unless $LOAD_PATH.include?(File.dirname(__FILE__))
3 | #Dir["/path/*.rb"].each { |file| require file }
4 |
5 | require 'ml_active_function'
6 | require 'ml_active_method'
7 |
8 | class MLDelta
9 |
10 | attr_accessor :learning_rate, :max_iteration, :convergence_value, :active_method, :random_scopes, :completion_block
11 |
12 | def initialize
13 | @active_function = MLActiveFunction.new
14 | @active_method = MLActiveMethod::TANH
15 | @iteration = 0
16 | @sum_error = 0.0
17 | @patterns = []
18 | @weights = []
19 | @targets = []
20 | @learning_rate = 0.5
21 | @max_iteration = 1
22 | @convergence_value = 0.001
23 | @random_scopes = [-0.5, 0.5]
24 | end
25 |
26 | def add_patterns(inputs, target)
27 | @patterns << inputs
28 | @targets << target
29 | end
30 |
31 | def setup_weights(weights)
32 | @weights.clear
33 | @weights += weights
34 | end
35 |
36 | def setup_random_scopes(min, max)
37 | @random_scopes.clear
38 | @random_scopes = [min, max]
39 | end
40 |
41 | def random_weights
42 | @weights.clear
43 |
44 | # Follows the inputs count to decide how many weights it needs.
45 | net_count = @patterns.first.count
46 | max = random_scopes.last / net_count
47 | min = random_scopes.first / net_count
48 |
49 | net_count.times { @weights << rand(min..max) }
50 | end
51 |
52 | def training
53 | @iteration += 1
54 | @sum_error = 0.0
55 | @patterns.each_with_index{ |inputs, index|
56 | turning_weights_with_inputs(inputs, @targets[index])
57 | }
58 |
59 | if (@iteration >= @max_iteration) || (calculate_iteration_error <= @convergence_value)
60 | @completion_block.call( true, @weights, @iteration ) unless @completion_block.nil?
61 | else
62 | @iteration_block.call( @iteration, @weights ) unless @iteration_block.nil?
63 | training
64 | end
65 | end
66 |
67 | def training_with_completion(&block)
68 | @completion_block = block
69 | training
70 | end
71 |
72 | def training_with_iteration(iteration_block, completion_block)
73 | @iteration_block, @completion_block = iteration_block, completion_block
74 | training
75 | end
76 |
77 | def direct_output_by_patterns(inputs, &block)
78 | block.call(net_with_inputs inputs) if block_given?
79 | end
80 |
81 | private
82 | def multiply_matrix(matrix, number)
83 | matrix.map { |obj| obj * number }
84 | end
85 |
86 | def plus_matrix(matrix, anotherMatrix)
87 | matrix.collect.with_index { |obj, i| obj + anotherMatrix[i] }
88 | end
89 |
90 | def activate_output_value(net_output)
91 | case @active_method
92 | when MLActiveMethod::SGN
93 | @active_function.sgn net_output
94 | when MLActiveMethod::SIGMOID
95 | @active_function.sigmoid net_output
96 | when MLActiveMethod::TANH
97 | @active_function.tanh net_output
98 | when MLActiveMethod::RBF
99 | @active_function.rbf net_output, 2.0
100 | else
101 | # Nothing else
102 | net_output
103 | end
104 | end
105 |
106 | def net_with_inputs(inputs)
107 | sum = 0.0
108 | inputs.each.with_index{ |obj, i| sum += ( obj * @weights[i] ) }
109 | activate_output_value sum
110 | end
111 |
112 | def dash_of_net(net_output)
113 | case @active_method
114 | when MLActiveMethod::SGN
115 | @active_function.dash_sgn net_output
116 | when MLActiveMethod::SIGMOID
117 | @active_function.dash_sigmoid net_output
118 | when MLActiveMethod::TANH
119 | @active_function.dash_tanh net_output
120 | when MLActiveMethod::RBF
121 | @active_function.dash_rbf net_output, 2.0
122 | else
123 | # Nothing else
124 | net_output
125 | end
126 | end
127 |
128 | # Delta defined cost function formula
129 | def calculate_iteration_error
130 | (@sum_error / @patterns.count) * 0.5
131 | end
132 |
133 | def sum_error(error_value)
134 | @sum_error += (error_value ** 2)
135 | end
136 |
137 | def turning_weights_with_inputs(inputs, target_value)
138 | net_output = net_with_inputs(inputs)
139 | dash_output = dash_of_net(net_output)
140 | error_value = target_value - net_output
141 |
142 | # new weights = learning rate * (target value - net output) * f'(net) * x1 + w1
143 | simga_value = @learning_rate * error_value * dash_output
144 | delta_weights = multiply_matrix(inputs, simga_value)
145 | new_weights = plus_matrix(@weights, delta_weights)
146 |
147 | setup_weights(new_weights)
148 | sum_error(error_value)
149 | end
150 | end
151 |
152 |
--------------------------------------------------------------------------------