├── LLCNN.png ├── LLCNN.prototxt ├── LLCNN_deploy.prototxt ├── LLCNN_solver.prototxt ├── README.md └── img ├── LLCNN.png ├── module.PNG └── network.PNG /LLCNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BestJuly/LLCNN/1ff3ff0bdb3738d60ceec6711e219e144d2cc556/LLCNN.png -------------------------------------------------------------------------------- /LLCNN.prototxt: -------------------------------------------------------------------------------- 1 | name: "LLCNN-18" 2 | layer { 3 | name: "data" 4 | type: "HDF5Data" 5 | top: "data" 6 | top: "label" 7 | hdf5_data_param { 8 | source: "examples/LLNet/1channel/train.txt" 9 | batch_size: 64 10 | } 11 | include: { phase: TRAIN } 12 | } 13 | layer { 14 | name: "data" 15 | type: "HDF5Data" 16 | top: "data" 17 | top: "label" 18 | hdf5_data_param { 19 | source: "examples/LLNet/1channel/test.txt" 20 | batch_size: 16 21 | } 22 | include: { phase: TEST } 23 | } 24 | 25 | layer { 26 | bottom: "data" 27 | top: "conv1" 28 | name: "conv1" 29 | type: "Convolution" 30 | convolution_param { 31 | num_output: 64 32 | kernel_size: 3 33 | pad: 1 34 | stride: 1 35 | weight_filler { 36 | type: "msra" 37 | } 38 | 39 | } 40 | } 41 | 42 | layer { 43 | bottom: "conv1" 44 | top: "conv1" 45 | name: "relu1" 46 | type: "ReLU" 47 | } 48 | 49 | layer { 50 | bottom: "conv1" 51 | top: "conv2" 52 | name: "conv2" 53 | type: "Convolution" 54 | convolution_param { 55 | num_output: 64 56 | kernel_size: 1 57 | pad: 0 58 | stride: 1 59 | weight_filler { 60 | type: "msra" 61 | } 62 | 63 | } 64 | } 65 | 66 | layer { 67 | bottom: "conv1" 68 | top: "conv3" 69 | name: "conv3" 70 | type: "Convolution" 71 | convolution_param { 72 | num_output: 64 73 | kernel_size: 3 74 | pad: 1 75 | stride: 1 76 | weight_filler { 77 | type: "msra" 78 | } 79 | 80 | } 81 | } 82 | 83 | layer { 84 | bottom: "conv3" 85 | top: "conv3" 86 | name: "relu3" 87 | type: "ReLU" 88 | } 89 | 90 | layer { 91 | bottom: "conv3" 92 | top: "conv4" 93 | name: "conv4" 94 | type: "Convolution" 95 | convolution_param { 96 | num_output: 64 97 | kernel_size: 3 98 | pad: 1 99 | stride: 1 100 | weight_filler { 101 | type: "msra" 102 | } 103 | 104 | } 105 | } 106 | 107 | layer { 108 | bottom: "conv2" 109 | bottom: "conv4" 110 | top: "res2a" 111 | name: "res2a" 112 | type: "Eltwise" 113 | eltwise_param { 114 | operation: SUM 115 | } 116 | } 117 | 118 | layer { 119 | bottom: "res2a" 120 | top: "res2a" 121 | name: "res2a_relu" 122 | type: "ReLU" 123 | } 124 | 125 | layer { 126 | bottom: "res2a" 127 | top: "conv5" 128 | name: "conv5" 129 | type: "Convolution" 130 | convolution_param { 131 | num_output: 64 132 | kernel_size: 3 133 | pad: 1 134 | stride: 1 135 | weight_filler { 136 | type: "msra" 137 | } 138 | 139 | } 140 | } 141 | 142 | layer { 143 | bottom: "conv5" 144 | top: "conv5" 145 | name: "relu5" 146 | type: "ReLU" 147 | } 148 | 149 | layer { 150 | bottom: "conv5" 151 | top: "conv6" 152 | name: "conv6" 153 | type: "Convolution" 154 | convolution_param { 155 | num_output: 64 156 | kernel_size: 3 157 | pad: 1 158 | stride: 1 159 | weight_filler { 160 | type: "msra" 161 | } 162 | 163 | } 164 | } 165 | 166 | layer { 167 | bottom: "res2a" 168 | bottom: "conv6" 169 | top: "res2b" 170 | name: "res2b" 171 | type: "Eltwise" 172 | eltwise_param { 173 | operation: SUM 174 | } 175 | } 176 | 177 | layer { 178 | bottom: "res2b" 179 | top: "res2b" 180 | name: "res2b_relu" 181 | type: "ReLU" 182 | } 183 | 184 | layer { 185 | bottom: "res2b" 186 | top: "conv7" 187 | name: "conv7" 188 | type: "Convolution" 189 | convolution_param { 190 | num_output: 64 191 | kernel_size: 1 192 | pad: 0 193 | stride: 1 194 | weight_filler { 195 | type: "msra" 196 | } 197 | 198 | 199 | } 200 | } 201 | 202 | layer { 203 | bottom: "res2b" 204 | top: "conv8" 205 | name: "conv8" 206 | type: "Convolution" 207 | convolution_param { 208 | num_output: 64 209 | kernel_size: 3 210 | pad: 1 211 | stride: 1 212 | weight_filler { 213 | type: "msra" 214 | } 215 | 216 | } 217 | } 218 | 219 | layer { 220 | bottom: "conv8" 221 | top: "conv8" 222 | name: "conv8_relu" 223 | type: "ReLU" 224 | } 225 | 226 | layer { 227 | bottom: "conv8" 228 | top: "conv9" 229 | name: "conv9" 230 | type: "Convolution" 231 | convolution_param { 232 | num_output: 64 233 | kernel_size: 3 234 | pad: 1 235 | stride: 1 236 | weight_filler { 237 | type: "msra" 238 | } 239 | 240 | } 241 | } 242 | 243 | layer { 244 | bottom: "conv7" 245 | bottom: "conv9" 246 | top: "res3a" 247 | name: "res3a" 248 | type: "Eltwise" 249 | eltwise_param { 250 | operation: SUM 251 | } 252 | } 253 | 254 | layer { 255 | bottom: "res3a" 256 | top: "res3a" 257 | name: "res3a_relu" 258 | type: "ReLU" 259 | } 260 | 261 | layer { 262 | bottom: "res3a" 263 | top: "conv10" 264 | name: "conv10" 265 | type: "Convolution" 266 | convolution_param { 267 | num_output: 64 268 | kernel_size: 3 269 | pad: 1 270 | stride: 1 271 | weight_filler { 272 | type: "msra" 273 | } 274 | 275 | 276 | } 277 | } 278 | 279 | layer { 280 | bottom: "conv10" 281 | top: "conv10" 282 | name: "relu10" 283 | type: "ReLU" 284 | } 285 | 286 | layer { 287 | bottom: "conv10" 288 | top: "conv11" 289 | name: "conv11" 290 | type: "Convolution" 291 | convolution_param { 292 | num_output: 64 293 | kernel_size: 3 294 | pad: 1 295 | stride: 1 296 | weight_filler { 297 | type: "msra" 298 | } 299 | 300 | } 301 | } 302 | 303 | layer { 304 | bottom: "res3a" 305 | bottom: "conv11" 306 | top: "res3b" 307 | name: "res3b" 308 | type: "Eltwise" 309 | eltwise_param { 310 | operation: SUM 311 | } 312 | } 313 | 314 | layer { 315 | bottom: "res3b" 316 | top: "res3b" 317 | name: "res3b_relu" 318 | type: "ReLU" 319 | } 320 | 321 | layer { 322 | bottom: "res3b" 323 | top: "conv12" 324 | name: "conv12" 325 | type: "Convolution" 326 | convolution_param { 327 | num_output: 64 328 | kernel_size: 1 329 | pad: 0 330 | stride: 1 331 | weight_filler { 332 | type: "msra" 333 | } 334 | 335 | 336 | } 337 | } 338 | 339 | layer { 340 | bottom: "res3b" 341 | top: "conv13" 342 | name: "conv13" 343 | type: "Convolution" 344 | convolution_param { 345 | num_output: 64 346 | kernel_size: 3 347 | pad: 1 348 | stride: 1 349 | weight_filler { 350 | type: "msra" 351 | } 352 | 353 | } 354 | } 355 | 356 | layer { 357 | bottom: "conv13" 358 | top: "conv13" 359 | name: "conv13_relu" 360 | type: "ReLU" 361 | } 362 | 363 | layer { 364 | bottom: "conv13" 365 | top: "conv14" 366 | name: "conv14" 367 | type: "Convolution" 368 | convolution_param { 369 | num_output: 64 370 | kernel_size: 3 371 | pad: 1 372 | stride: 1 373 | weight_filler { 374 | type: "msra" 375 | } 376 | 377 | } 378 | } 379 | 380 | layer { 381 | bottom: "conv12" 382 | bottom: "conv14" 383 | top: "res4a" 384 | name: "res4a" 385 | type: "Eltwise" 386 | eltwise_param { 387 | operation: SUM 388 | } 389 | } 390 | 391 | layer { 392 | bottom: "res4a" 393 | top: "res4a" 394 | name: "res4a_relu" 395 | type: "ReLU" 396 | } 397 | 398 | layer { 399 | bottom: "res4a" 400 | top: "conv15" 401 | name: "conv15" 402 | type: "Convolution" 403 | convolution_param { 404 | num_output: 64 405 | kernel_size: 3 406 | pad: 1 407 | stride: 1 408 | weight_filler { 409 | type: "msra" 410 | } 411 | 412 | 413 | } 414 | } 415 | 416 | layer { 417 | bottom: "conv15" 418 | top: "conv15" 419 | name: "relu15" 420 | type: "ReLU" 421 | } 422 | 423 | layer { 424 | bottom: "conv15" 425 | top: "conv16" 426 | name: "conv16" 427 | type: "Convolution" 428 | convolution_param { 429 | num_output: 64 430 | kernel_size: 3 431 | pad: 1 432 | stride: 1 433 | weight_filler { 434 | type: "msra" 435 | } 436 | 437 | } 438 | } 439 | 440 | layer { 441 | bottom: "res4a" 442 | bottom: "conv16" 443 | top: "res4b" 444 | name: "res4b" 445 | type: "Eltwise" 446 | eltwise_param { 447 | operation: SUM 448 | } 449 | } 450 | 451 | layer { 452 | bottom: "res4b" 453 | top: "res4b" 454 | name: "res4b_relu" 455 | type: "ReLU" 456 | } 457 | 458 | layer { 459 | name: "conv17" 460 | type: "Convolution" 461 | bottom: "res4b" 462 | top: "conv17" 463 | param { 464 | lr_mult: 1 465 | } 466 | param { 467 | lr_mult: 0.1 468 | } 469 | convolution_param { 470 | num_output: 1 471 | kernel_size: 3 472 | stride: 1 473 | pad: 1 474 | weight_filler { 475 | type: "msra" 476 | } 477 | 478 | } 479 | } 480 | 481 | layer { 482 | name: "ssimloss" 483 | type: "SSIMLoss" 484 | bottom: "conv17" 485 | bottom: "label" 486 | top: "loss" 487 | loss_weight: 1 # <- set whatever you fancy 488 | ssim_loss_param{ 489 | kernel_size: 8 # <- The kernel size is linked to the gaussian variance (circular). The kernel encloses +/1 3*sigma 490 | stride: 8 # <- Equal strides in both dimensions 491 | c1: 0.0001 # <- Let these be 492 | c2: 0.001 # <- Let these be 493 | } 494 | } 495 | 496 | -------------------------------------------------------------------------------- /LLCNN_deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "LLCNN-18" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 1 5 | input_dim: 256 6 | input_dim: 256 7 | 8 | layer { 9 | bottom: "data" 10 | top: "conv1" 11 | name: "conv1" 12 | type: "Convolution" 13 | convolution_param { 14 | num_output: 64 15 | kernel_size: 3 16 | pad: 1 17 | stride: 1 18 | weight_filler { 19 | type: "msra" 20 | } 21 | 22 | } 23 | } 24 | 25 | layer { 26 | bottom: "conv1" 27 | top: "conv1" 28 | name: "relu1" 29 | type: "ReLU" 30 | } 31 | 32 | layer { 33 | bottom: "conv1" 34 | top: "conv2" 35 | name: "conv2" 36 | type: "Convolution" 37 | convolution_param { 38 | num_output: 64 39 | kernel_size: 1 40 | pad: 0 41 | stride: 1 42 | weight_filler { 43 | type: "msra" 44 | } 45 | 46 | } 47 | } 48 | 49 | layer { 50 | bottom: "conv1" 51 | top: "conv3" 52 | name: "conv3" 53 | type: "Convolution" 54 | convolution_param { 55 | num_output: 64 56 | kernel_size: 3 57 | pad: 1 58 | stride: 1 59 | weight_filler { 60 | type: "msra" 61 | } 62 | 63 | } 64 | } 65 | 66 | layer { 67 | bottom: "conv3" 68 | top: "conv3" 69 | name: "relu3" 70 | type: "ReLU" 71 | } 72 | 73 | layer { 74 | bottom: "conv3" 75 | top: "conv4" 76 | name: "conv4" 77 | type: "Convolution" 78 | convolution_param { 79 | num_output: 64 80 | kernel_size: 3 81 | pad: 1 82 | stride: 1 83 | weight_filler { 84 | type: "msra" 85 | } 86 | 87 | } 88 | } 89 | 90 | layer { 91 | bottom: "conv2" 92 | bottom: "conv4" 93 | top: "res2a" 94 | name: "res2a" 95 | type: "Eltwise" 96 | eltwise_param { 97 | operation: SUM 98 | } 99 | } 100 | 101 | layer { 102 | bottom: "res2a" 103 | top: "res2a" 104 | name: "res2a_relu" 105 | type: "ReLU" 106 | } 107 | 108 | layer { 109 | bottom: "res2a" 110 | top: "conv5" 111 | name: "conv5" 112 | type: "Convolution" 113 | convolution_param { 114 | num_output: 64 115 | kernel_size: 3 116 | pad: 1 117 | stride: 1 118 | weight_filler { 119 | type: "msra" 120 | } 121 | 122 | } 123 | } 124 | 125 | layer { 126 | bottom: "conv5" 127 | top: "conv5" 128 | name: "relu5" 129 | type: "ReLU" 130 | } 131 | 132 | layer { 133 | bottom: "conv5" 134 | top: "conv6" 135 | name: "conv6" 136 | type: "Convolution" 137 | convolution_param { 138 | num_output: 64 139 | kernel_size: 3 140 | pad: 1 141 | stride: 1 142 | weight_filler { 143 | type: "msra" 144 | } 145 | 146 | } 147 | } 148 | 149 | layer { 150 | bottom: "res2a" 151 | bottom: "conv6" 152 | top: "res2b" 153 | name: "res2b" 154 | type: "Eltwise" 155 | eltwise_param { 156 | operation: SUM 157 | } 158 | } 159 | 160 | layer { 161 | bottom: "res2b" 162 | top: "res2b" 163 | name: "res2b_relu" 164 | type: "ReLU" 165 | } 166 | 167 | layer { 168 | bottom: "res2b" 169 | top: "conv7" 170 | name: "conv7" 171 | type: "Convolution" 172 | convolution_param { 173 | num_output: 64 174 | kernel_size: 1 175 | pad: 0 176 | stride: 1 177 | weight_filler { 178 | type: "msra" 179 | } 180 | 181 | 182 | } 183 | } 184 | 185 | layer { 186 | bottom: "res2b" 187 | top: "conv8" 188 | name: "conv8" 189 | type: "Convolution" 190 | convolution_param { 191 | num_output: 64 192 | kernel_size: 3 193 | pad: 1 194 | stride: 1 195 | weight_filler { 196 | type: "msra" 197 | } 198 | 199 | } 200 | } 201 | 202 | layer { 203 | bottom: "conv8" 204 | top: "conv8" 205 | name: "conv8_relu" 206 | type: "ReLU" 207 | } 208 | 209 | layer { 210 | bottom: "conv8" 211 | top: "conv9" 212 | name: "conv9" 213 | type: "Convolution" 214 | convolution_param { 215 | num_output: 64 216 | kernel_size: 3 217 | pad: 1 218 | stride: 1 219 | weight_filler { 220 | type: "msra" 221 | } 222 | 223 | } 224 | } 225 | 226 | layer { 227 | bottom: "conv7" 228 | bottom: "conv9" 229 | top: "res3a" 230 | name: "res3a" 231 | type: "Eltwise" 232 | eltwise_param { 233 | operation: SUM 234 | } 235 | } 236 | 237 | layer { 238 | bottom: "res3a" 239 | top: "res3a" 240 | name: "res3a_relu" 241 | type: "ReLU" 242 | } 243 | 244 | layer { 245 | bottom: "res3a" 246 | top: "conv10" 247 | name: "conv10" 248 | type: "Convolution" 249 | convolution_param { 250 | num_output: 64 251 | kernel_size: 3 252 | pad: 1 253 | stride: 1 254 | weight_filler { 255 | type: "msra" 256 | } 257 | 258 | 259 | } 260 | } 261 | 262 | layer { 263 | bottom: "conv10" 264 | top: "conv10" 265 | name: "relu10" 266 | type: "ReLU" 267 | } 268 | 269 | layer { 270 | bottom: "conv10" 271 | top: "conv11" 272 | name: "conv11" 273 | type: "Convolution" 274 | convolution_param { 275 | num_output: 64 276 | kernel_size: 3 277 | pad: 1 278 | stride: 1 279 | weight_filler { 280 | type: "msra" 281 | } 282 | 283 | } 284 | } 285 | 286 | layer { 287 | bottom: "res3a" 288 | bottom: "conv11" 289 | top: "res3b" 290 | name: "res3b" 291 | type: "Eltwise" 292 | eltwise_param { 293 | operation: SUM 294 | } 295 | } 296 | 297 | layer { 298 | bottom: "res3b" 299 | top: "res3b" 300 | name: "res3b_relu" 301 | type: "ReLU" 302 | } 303 | 304 | layer { 305 | bottom: "res3b" 306 | top: "conv12" 307 | name: "conv12" 308 | type: "Convolution" 309 | convolution_param { 310 | num_output: 64 311 | kernel_size: 1 312 | pad: 0 313 | stride: 1 314 | weight_filler { 315 | type: "msra" 316 | } 317 | 318 | 319 | } 320 | } 321 | 322 | layer { 323 | bottom: "res3b" 324 | top: "conv13" 325 | name: "conv13" 326 | type: "Convolution" 327 | convolution_param { 328 | num_output: 64 329 | kernel_size: 3 330 | pad: 1 331 | stride: 1 332 | weight_filler { 333 | type: "msra" 334 | } 335 | 336 | } 337 | } 338 | 339 | layer { 340 | bottom: "conv13" 341 | top: "conv13" 342 | name: "conv13_relu" 343 | type: "ReLU" 344 | } 345 | 346 | layer { 347 | bottom: "conv13" 348 | top: "conv14" 349 | name: "conv14" 350 | type: "Convolution" 351 | convolution_param { 352 | num_output: 64 353 | kernel_size: 3 354 | pad: 1 355 | stride: 1 356 | weight_filler { 357 | type: "msra" 358 | } 359 | 360 | } 361 | } 362 | 363 | layer { 364 | bottom: "conv12" 365 | bottom: "conv14" 366 | top: "res4a" 367 | name: "res4a" 368 | type: "Eltwise" 369 | eltwise_param { 370 | operation: SUM 371 | } 372 | } 373 | 374 | layer { 375 | bottom: "res4a" 376 | top: "res4a" 377 | name: "res4a_relu" 378 | type: "ReLU" 379 | } 380 | 381 | layer { 382 | bottom: "res4a" 383 | top: "conv15" 384 | name: "conv15" 385 | type: "Convolution" 386 | convolution_param { 387 | num_output: 64 388 | kernel_size: 3 389 | pad: 1 390 | stride: 1 391 | weight_filler { 392 | type: "msra" 393 | } 394 | 395 | 396 | } 397 | } 398 | 399 | layer { 400 | bottom: "conv15" 401 | top: "conv15" 402 | name: "relu15" 403 | type: "ReLU" 404 | } 405 | 406 | layer { 407 | bottom: "conv15" 408 | top: "conv16" 409 | name: "conv16" 410 | type: "Convolution" 411 | convolution_param { 412 | num_output: 64 413 | kernel_size: 3 414 | pad: 1 415 | stride: 1 416 | weight_filler { 417 | type: "msra" 418 | } 419 | 420 | } 421 | } 422 | 423 | layer { 424 | bottom: "res4a" 425 | bottom: "conv16" 426 | top: "res4b" 427 | name: "res4b" 428 | type: "Eltwise" 429 | eltwise_param { 430 | operation: SUM 431 | } 432 | } 433 | 434 | layer { 435 | bottom: "res4b" 436 | top: "res4b" 437 | name: "res4b_relu" 438 | type: "ReLU" 439 | } 440 | 441 | layer { 442 | name: "conv17" 443 | type: "Convolution" 444 | bottom: "res4b" 445 | top: "conv17" 446 | param { 447 | lr_mult: 1 448 | } 449 | param { 450 | lr_mult: 0.1 451 | } 452 | convolution_param { 453 | num_output: 1 454 | kernel_size: 3 455 | stride: 1 456 | pad: 1 457 | weight_filler { 458 | type: "msra" 459 | } 460 | 461 | } 462 | } 463 | 464 | 465 | -------------------------------------------------------------------------------- /LLCNN_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/LLNet/resnet/ssim/resnet18.prototxt" 3 | test_iter: 556 4 | # Carry out testing every 500 training iterations. 5 | test_interval: 500 6 | # The base learning rate, momentum and the weight decay of the network. 7 | base_lr: 0.01 8 | momentum: 0.9 9 | weight_decay: 0.0001 10 | clip_gradients: 0.1 11 | # The learning rate policy 12 | lr_policy: "step" 13 | gamma: 0.1 14 | stepsize: 500000 15 | # Display every 100 iterations 16 | display: 100 17 | # The maximum number of iterations 18 | max_iter: 800000 19 | # snapshot intermediate results 20 | snapshot: 40000 21 | snapshot_prefix: "examples/LLNet/resnet/ssim/caffemodel/ssim" 22 | # solver mode: CPU or GPU 23 | solver_mode: GPU 24 | 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LLCNN 2 | This is the training code for [LLCNN: A convolutional neural network for low-light image enhancement](https://ieeexplore.ieee.org/abstract/document/8305143). 3 | The code can be used in caffe. The official version of caffe may not support SSIM loss function. You need to add SSIM loss layer or just use [this](https://github.com/onalbach/caffe-deep-shading). 4 | 5 | ## Network Architecture 6 | We use one convolutional layer to do pre-processing and another convolutional layer to fuse feature maps and generate the output image. Several special-designed convolutional modules are placed between these two convolutional layers. 7 | ![image](./img/network.PNG) 8 | ### Special designed module 9 | The several special-designed convolutional module is inspired by residual learning. The architecture is shown as follows. 10 | ![image](./img/module.PNG) 11 | 12 | ## Data 13 | The training images are the same as VDSR, which can be downloaded using this [URL](http://cv.snu.ac.kr/research/VDSR/train_data.zip). Other image processing datasets can be used to generate training and testing data. Here are some datasets usually used in image processing community: [Set 5](https://uofi.box.com/shared/static/kfahv87nfe8ax910l85dksyl2q212voc.zip)/ 14 | [Set 14](https://uofi.box.com/shared/static/igsnfieh4lz68l926l8xbklwsnnk8we9.zip)/ 15 | [Urban100](https://uofi.box.com/shared/static/65upg43jjd0a4cwsiqgl6o6ixube6klm.zip)/ 16 | [BSD100](https://uofi.box.com/shared/static/qgctsplb8txrksm9to9x01zfa4m61ngq.zip). 17 | For preparing training data, you can use the code from [VDSR](https://github.com/huangzehao/caffe-vdsr) or [SRCNN](http://mmlab.ie.cuhk.edu.hk/projects/SRCNN.html) and add nonlinear algorithms to simulate low-light conditions. 18 | 19 | ## Citing LLCNN 20 | If you find LLCNN useful in your research, please consider citing: 21 | ``` 22 | @INPROCEEDINGS{8305143, 23 | author={L. Tao and C. Zhu and G. Xiang and Y. Li and H. Jia and X. Xie}, 24 | booktitle={2017 IEEE Visual Communications and Image Processing (VCIP)}, 25 | title={LLCNN: A convolutional neural network for low-light image enhancement}, 26 | year={2017} 27 | ``` -------------------------------------------------------------------------------- /img/LLCNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BestJuly/LLCNN/1ff3ff0bdb3738d60ceec6711e219e144d2cc556/img/LLCNN.png -------------------------------------------------------------------------------- /img/module.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BestJuly/LLCNN/1ff3ff0bdb3738d60ceec6711e219e144d2cc556/img/module.PNG -------------------------------------------------------------------------------- /img/network.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BestJuly/LLCNN/1ff3ff0bdb3738d60ceec6711e219e144d2cc556/img/network.PNG --------------------------------------------------------------------------------