├── .gitignore ├── densenet ├── __init__.py ├── blocks │ ├── __init__.py │ └── one_d.py ├── models │ ├── __init__.py │ └── one_d.py └── classifiers │ ├── __init__.py │ └── one_d.py ├── setup.py ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /densenet/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /densenet/blocks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /densenet/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /densenet/classifiers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup(name='densenet', 4 | version='0.5', 5 | description='An implementation of DenseNet for 1D inputs in Keras.', 6 | url='https://github.com/ankitvgupta/densenet_1d', 7 | author='Ankit Gupta', 8 | author_email='ankitgupta@alumni.harvard.edu', 9 | license='MIT', 10 | packages=find_packages(), 11 | zip_safe=False, 12 | # install_requires=['keras'], 13 | ) 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Ankit Gupta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # densenet_1d 2 | 3 | This repository contains a Keras implementation of the DenseNet paper (Huang et al, "Densely Connected Convolutional Networks", CVPR 2017). This implementation will focus on use-cases where the inputs are 1D sequences. 4 | 5 | # Setup 6 | To install densenet, simply clone this repository, and run 7 | ``` 8 | python setup.py install 9 | ``` 10 | 11 | # Usage 12 | 13 | The classifiers directory contains classifiers implemented as subclasses of `keras.models.Model` classes. This means that once a `densenet.classifier` is instantiated, it contains all of the usual methods of `keras.models.Model`, such as `fit`, `predict`, `evaluate`, `summary`, etc. 14 | 15 | Here is an instantiation of the model that matches the original Huang et al. paper, except using a one-dimensional input rather than a two-dimensional input: 16 | 17 | ```python 18 | 19 | from densenet.classifiers.one_d import DenseNet121 20 | model = DenseNet121(input_shape=(224, 13)) 21 | print(model.summary()) 22 | ``` 23 | Upon running those lines, you should see an extensive summary indicating the layers in the model. 24 | 25 | Note that the DenseNet implementations are highly customizable. For example, say you want to replace the default width-3 convolutions with width-5 ones. Simply instantiate your model as 26 | 27 | ```python 28 | from densenet.classifiers.one_d import DenseNet121 29 | model = DenseNet121(input_shape=(224, 13), conv_kernel_width=5) 30 | print(model.summary()) 31 | ``` 32 | 33 | # References 34 | 35 | - [Original paper](https://arxiv.org/abs/1608.06993): Huang et al. "Densely Connected Convolutional Networks", CVPR 2017. 36 | - Another great implementation of DenseNets in Keras, although not one that uses 1D sequences. https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py 37 | - An implementation of Resnet in Keras whose directory structure was loosely used for this implementation: https://github.com/broadinstitute/keras-resnet 38 | 39 | 40 | -------------------------------------------------------------------------------- /densenet/blocks/one_d.py: -------------------------------------------------------------------------------- 1 | """ 2 | blocks/one_d.py 3 | Author: Ankit Gupta 4 | 5 | Implementations of various DenseNet blocks for 1D sequences 6 | 7 | This module contains helper functions that define the various subcomponents of a DenseNet. This includes dense blocks and transition blocks. 8 | 9 | """ 10 | 11 | from keras.layers import BatchNormalization, Activation, Conv1D, Concatenate, AveragePooling1D 12 | 13 | 14 | def H_l(k, bottleneck_size, kernel_width): 15 | """ 16 | A single convolutional "layer" as defined by Huang et al. Defined as H_l in the original paper 17 | 18 | :param k: int representing the "growth rate" of the DenseNet 19 | :param bottleneck_size: int representing the size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 20 | :param kernel_width: int representing the width of the main convolutional kernel 21 | :return a function wrapping the keras layers for H_l 22 | """ 23 | 24 | use_bottleneck = bottleneck_size > 0 25 | num_bottleneck_output_filters = k * bottleneck_size 26 | 27 | def f(x): 28 | if use_bottleneck: 29 | x = BatchNormalization()(x) 30 | x = Activation("relu")(x) 31 | x = Conv1D( 32 | num_bottleneck_output_filters, 33 | 1, 34 | strides=1, 35 | padding="same", 36 | dilation_rate=1)(x) 37 | x = BatchNormalization()(x) 38 | x = Activation("relu")(x) 39 | x = Conv1D( 40 | k, 41 | kernel_width, 42 | strides=1, 43 | padding="same", 44 | dilation_rate=1)(x) 45 | return x 46 | return f 47 | 48 | 49 | def dense_block(k, num_layers, kernel_width, bottleneck_size): 50 | """ 51 | A single dense block of the DenseNet 52 | 53 | :param k: int representing the "growth rate" of the DenseNet 54 | :param num_layers: int represending the number of layers in the block 55 | :param kernel_width: int representing the width of the main convolutional kernel 56 | :param bottleneck_size: int representing the size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 57 | :return a function wrapping the entire dense block 58 | """ 59 | def f(x): 60 | layers_to_concat = [x] 61 | for _ in range(num_layers): 62 | x = H_l(k, bottleneck_size, kernel_width)(x) 63 | layers_to_concat.append(x) 64 | x = Concatenate(axis=-1)(layers_to_concat) 65 | return x 66 | return f 67 | 68 | 69 | def transition_block(pool_size=2, stride=2, theta=0.5): 70 | """ 71 | A single transition block of the DenseNet 72 | 73 | :param pool_size: int represending the width of the average pool 74 | :param stride: int represending the stride of the average pool 75 | :param theta: int representing the amount of compression in the 1x1 convolution. Set to 1 for no compression. 76 | :return a function wrapping the entire transition block 77 | """ 78 | assert theta > 0 and theta <= 1 79 | 80 | def f(x): 81 | num_transition_output_filters = int(int(x.shape[2]) * float(theta)) 82 | x = BatchNormalization()(x) 83 | x = Activation("relu")(x) 84 | x = Conv1D( 85 | num_transition_output_filters, 86 | 1, 87 | strides=1, 88 | padding="same", 89 | dilation_rate=1)(x) 90 | x = AveragePooling1D( 91 | pool_size=pool_size, 92 | strides=stride, 93 | padding="same")(x) 94 | return x 95 | return f 96 | -------------------------------------------------------------------------------- /densenet/models/one_d.py: -------------------------------------------------------------------------------- 1 | """ 2 | models/one_d.py 3 | Author: Ankit Gupta 4 | 5 | Implementations of the core DenseNet model 6 | 7 | This module contains helper functions that define a DenseNet computational graph in Keras. Note that these functions are not immediately usable for classification, as the outputs are not softmaxed, and the functions have not been wrapped in keras.models.Model objects. 8 | """ 9 | from keras.layers import Conv1D, BatchNormalization, Activation, MaxPooling1D, GlobalAveragePooling1D 10 | from densenet.blocks.one_d import dense_block, transition_block 11 | 12 | 13 | def DenseNet( 14 | k, 15 | block_sizes, 16 | conv_kernel_width, 17 | bottleneck_size, 18 | transition_pool_size, 19 | transition_pool_stride, 20 | theta, 21 | initial_conv_width, 22 | initial_stride, 23 | initial_filters, 24 | initial_pool_width, 25 | initial_pool_stride, 26 | use_global_pooling): 27 | def f(x): 28 | x = Conv1D( 29 | initial_filters, 30 | initial_conv_width, 31 | strides=initial_stride, 32 | padding="same")(x) 33 | x = BatchNormalization()(x) 34 | x = Activation("relu")(x) 35 | x = MaxPooling1D( 36 | pool_size=initial_pool_width, 37 | strides=initial_pool_stride, 38 | padding="same")(x) 39 | 40 | # Add all but the last dense block 41 | for block_size in block_sizes[:-1]: 42 | x = dense_block( 43 | k, 44 | block_size, 45 | conv_kernel_width, 46 | bottleneck_size)(x) 47 | x = transition_block( 48 | pool_size=transition_pool_size, 49 | stride=transition_pool_stride, 50 | theta=theta)(x) 51 | 52 | # Add the last dense block 53 | final_block_size = block_sizes[-1] 54 | x = dense_block( 55 | k, 56 | final_block_size, 57 | conv_kernel_width, 58 | bottleneck_size)(x) 59 | x = BatchNormalization()(x) 60 | x = Activation("relu")(x) 61 | if use_global_pooling: 62 | x = GlobalAveragePooling1D()(x) 63 | return x 64 | return f 65 | 66 | 67 | def DenseNet121( 68 | k, 69 | conv_kernel_width, 70 | bottleneck_size, 71 | transition_pool_size, 72 | transition_pool_stride, 73 | theta, 74 | initial_conv_width, 75 | initial_stride, 76 | initial_filters, 77 | initial_pool_width, 78 | initial_pool_stride, 79 | use_global_pooling): 80 | block_sizes = [6, 12, 24, 16] 81 | return DenseNet( 82 | k, 83 | block_sizes, 84 | conv_kernel_width, 85 | bottleneck_size, 86 | transition_pool_size, 87 | transition_pool_stride, 88 | theta, 89 | initial_conv_width, 90 | initial_stride, 91 | initial_filters, 92 | initial_pool_width, 93 | initial_pool_stride, 94 | use_global_pooling) 95 | 96 | 97 | def DenseNet169( 98 | k, 99 | conv_kernel_width, 100 | bottleneck_size, 101 | transition_pool_size, 102 | transition_pool_stride, 103 | theta, 104 | initial_conv_width, 105 | initial_stride, 106 | initial_filters, 107 | initial_pool_width, 108 | initial_pool_stride, 109 | use_global_pooling): 110 | block_sizes = [6, 12, 32, 32] 111 | return DenseNet( 112 | k, 113 | block_sizes, 114 | conv_kernel_width, 115 | bottleneck_size, 116 | transition_pool_size, 117 | transition_pool_stride, 118 | theta, 119 | initial_conv_width, 120 | initial_stride, 121 | initial_filters, 122 | initial_pool_width, 123 | initial_pool_stride, 124 | use_global_pooling) 125 | 126 | 127 | def DenseNet201( 128 | k, 129 | conv_kernel_width, 130 | bottleneck_size, 131 | transition_pool_size, 132 | transition_pool_stride, 133 | theta, 134 | initial_conv_width, 135 | initial_stride, 136 | initial_filters, 137 | initial_pool_width, 138 | initial_pool_stride, 139 | use_global_pooling): 140 | block_sizes = [6, 12, 48, 32] 141 | return DenseNet( 142 | k, 143 | block_sizes, 144 | conv_kernel_width, 145 | bottleneck_size, 146 | transition_pool_size, 147 | transition_pool_stride, 148 | theta, 149 | initial_conv_width, 150 | initial_stride, 151 | initial_filters, 152 | initial_pool_width, 153 | initial_pool_stride, 154 | use_global_pooling) 155 | 156 | 157 | def DenseNet264( 158 | k, 159 | conv_kernel_width, 160 | bottleneck_size, 161 | transition_pool_size, 162 | transition_pool_stride, 163 | theta, 164 | initial_conv_width, 165 | initial_stride, 166 | initial_filters, 167 | initial_pool_width, 168 | initial_pool_stride, 169 | use_global_pooling): 170 | block_sizes = [6, 12, 64, 48] 171 | return DenseNet( 172 | k, 173 | block_sizes, 174 | conv_kernel_width, 175 | bottleneck_size, 176 | transition_pool_size, 177 | transition_pool_stride, 178 | theta, 179 | initial_conv_width, 180 | initial_stride, 181 | initial_filters, 182 | initial_pool_width, 183 | initial_pool_stride, 184 | use_global_pooling) 185 | -------------------------------------------------------------------------------- /densenet/classifiers/one_d.py: -------------------------------------------------------------------------------- 1 | """ 2 | classifiers/one_d.py 3 | Author: Ankit Gupta 4 | 5 | Implementations of classifiers using DenseNet 6 | 7 | This module contains subclasses of keras.models.Model that implement DenseNet through the Keras API. This works by calling the functions in densenet.models.one_d.py, adding the appropriate classification Keras functions, and wrapping the transformation in keras.models.Model objects. 8 | """ 9 | 10 | import keras.models 11 | from keras.layers import Input, Dense 12 | import densenet.models.one_d 13 | 14 | 15 | class DenseNet121(keras.models.Model): 16 | """ 17 | Create a Keras Model Object that is an implementation of DenseNet121 18 | :param input_shape: The shape of the inputs without the batch dimension. This should be a valid 1D sequence, such as (244, 25). 19 | :param num_outputs: the number of classes to predict 20 | :param k: The "growth rate" of the DenseNet model 21 | :param conv_kernel_width: The kernel width of each convolution in the dense blocks. 22 | :param bottleneck_size: The size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 23 | :param transition_pool_size: pool_size in the transition layer 24 | :param transition_pool_stride: pooling stride in the transition layer 25 | :param theta: Amount of compression in the transition layer. Set to 1 for no compression. 26 | :param initial_conv_width: Kernel width for the one convolution before the dense blocks 27 | :param initial_stride: Stride for the one convolution before the dense blocks 28 | :param initial_filters: Number of filters for the one convolution before the dense blocks 29 | :param initial_pool_width: pool_size for the one pooling before the dense blocks 30 | :param initial_pool_stride: stride for the one pooling before the dense blocks 31 | """ 32 | def __init__( 33 | self, 34 | input_shape, 35 | num_outputs=1000, 36 | k=32, 37 | conv_kernel_width=3, 38 | bottleneck_size=4, 39 | transition_pool_size=2, 40 | transition_pool_stride=2, 41 | theta=0.5, 42 | initial_conv_width=7, 43 | initial_stride=2, 44 | initial_filters=64, 45 | initial_pool_width=3, 46 | initial_pool_stride=2): 47 | model_input = Input(shape=input_shape) 48 | output = densenet.models.one_d.DenseNet121( 49 | k, 50 | conv_kernel_width, 51 | bottleneck_size, 52 | transition_pool_size, 53 | transition_pool_stride, 54 | theta, 55 | initial_conv_width, 56 | initial_stride, 57 | initial_filters, 58 | initial_pool_width, 59 | initial_pool_stride, 60 | use_global_pooling=True)(model_input) 61 | output = Dense(num_outputs, activation="softmax")(output) 62 | super(DenseNet121, self).__init__(model_input, output) 63 | 64 | 65 | class DenseNet169(keras.models.Model): 66 | """ 67 | Create a Keras Model Object that is an implementation of DenseNet169 68 | :param input_shape: The shape of the inputs without the batch dimension. This should be a valid 1D sequence, such as (244, 25). 69 | :param num_outputs: the number of classes to predict 70 | :param k: The "growth rate" of the DenseNet model 71 | :param conv_kernel_width: The kernel width of each convolution in the dense blocks. 72 | :param bottleneck_size: The size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 73 | :param transition_pool_size: pool_size in the transition layer 74 | :param transition_pool_stride: pooling stride in the transition layer 75 | :param theta: Amount of compression in the transition layer. Set to 1 for no compression. 76 | :param initial_conv_width: Kernel width for the one convolution before the dense blocks 77 | :param initial_stride: Stride for the one convolution before the dense blocks 78 | :param initial_filters: Number of filters for the one convolution before the dense blocks 79 | :param initial_pool_width: pool_size for the one pooling before the dense blocks 80 | :param initial_pool_stride: stride for the one pooling before the dense blocks 81 | """ 82 | def __init__( 83 | self, 84 | input_shape, 85 | num_outputs=1000, 86 | k=32, 87 | conv_kernel_width=3, 88 | bottleneck_size=4, 89 | transition_pool_size=2, 90 | transition_pool_stride=2, 91 | theta=0.5, 92 | initial_conv_width=7, 93 | initial_stride=2, 94 | initial_filters=64, 95 | initial_pool_width=3, 96 | initial_pool_stride=2): 97 | model_input = Input(shape=input_shape) 98 | output = densenet.models.one_d.DenseNet169( 99 | k, 100 | conv_kernel_width, 101 | bottleneck_size, 102 | transition_pool_size, 103 | transition_pool_stride, 104 | theta, 105 | initial_conv_width, 106 | initial_stride, 107 | initial_filters, 108 | initial_pool_width, 109 | initial_pool_stride, 110 | use_global_pooling=True)(model_input) 111 | output = Dense(num_outputs, activation="softmax")(output) 112 | super(DenseNet169, self).__init__(model_input, output) 113 | 114 | 115 | class DenseNet201(keras.models.Model): 116 | """ 117 | Create a Keras Model Object that is an implementation of DenseNet201 118 | :param input_shape: The shape of the inputs without the batch dimension. This should be a valid 1D sequence, such as (244, 25). 119 | :param num_outputs: the number of classes to predict 120 | :param k: The "growth rate" of the DenseNet model 121 | :param conv_kernel_width: The kernel width of each convolution in the dense blocks. 122 | :param bottleneck_size: The size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 123 | :param transition_pool_size: pool_size in the transition layer 124 | :param transition_pool_stride: pooling stride in the transition layer 125 | :param theta: Amount of compression in the transition layer. Set to 1 for no compression. 126 | :param initial_conv_width: Kernel width for the one convolution before the dense blocks 127 | :param initial_stride: Stride for the one convolution before the dense blocks 128 | :param initial_filters: Number of filters for the one convolution before the dense blocks 129 | :param initial_pool_width: pool_size for the one pooling before the dense blocks 130 | :param initial_pool_stride: stride for the one pooling before the dense blocks 131 | """ 132 | def __init__( 133 | self, 134 | input_shape, 135 | num_outputs=1000, 136 | k=32, 137 | conv_kernel_width=3, 138 | bottleneck_size=4, 139 | transition_pool_size=2, 140 | transition_pool_stride=2, 141 | theta=0.5, 142 | initial_conv_width=7, 143 | initial_stride=2, 144 | initial_filters=64, 145 | initial_pool_width=3, 146 | initial_pool_stride=2): 147 | model_input = Input(shape=input_shape) 148 | output = densenet.models.one_d.DenseNet201( 149 | k, 150 | conv_kernel_width, 151 | bottleneck_size, 152 | transition_pool_size, 153 | transition_pool_stride, 154 | theta, 155 | initial_conv_width, 156 | initial_stride, 157 | initial_filters, 158 | initial_pool_width, 159 | initial_pool_stride, 160 | use_global_pooling=True)(model_input) 161 | output = Dense(num_outputs, activation="softmax")(output) 162 | super(DenseNet201, self).__init__(model_input, output) 163 | 164 | 165 | class DenseNet264(keras.models.Model): 166 | """ 167 | Create a Keras Model Object that is an implementation of DenseNet264 168 | :param input_shape: The shape of the inputs without the batch dimension. This should be a valid 1D sequence, such as (244, 25). 169 | :param num_outputs: the number of classes to predict 170 | :param k: The "growth rate" of the DenseNet model 171 | :param conv_kernel_width: The kernel width of each convolution in the dense blocks. 172 | :param bottleneck_size: The size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 173 | :param transition_pool_size: pool_size in the transition layer 174 | :param transition_pool_stride: pooling stride in the transition layer 175 | :param theta: Amount of compression in the transition layer. Set to 1 for no compression. 176 | :param initial_conv_width: Kernel width for the one convolution before the dense blocks 177 | :param initial_stride: Stride for the one convolution before the dense blocks 178 | :param initial_filters: Number of filters for the one convolution before the dense blocks 179 | :param initial_pool_width: pool_size for the one pooling before the dense blocks 180 | :param initial_pool_stride: stride for the one pooling before the dense blocks 181 | """ 182 | def __init__( 183 | self, 184 | input_shape, 185 | num_outputs=1000, 186 | k=32, 187 | conv_kernel_width=3, 188 | bottleneck_size=4, 189 | transition_pool_size=2, 190 | transition_pool_stride=2, 191 | theta=0.5, 192 | initial_conv_width=7, 193 | initial_stride=2, 194 | initial_filters=64, 195 | initial_pool_width=3, 196 | initial_pool_stride=2): 197 | model_input = Input(shape=input_shape) 198 | output = densenet.models.one_d.DenseNet264( 199 | k, 200 | conv_kernel_width, 201 | bottleneck_size, 202 | transition_pool_size, 203 | transition_pool_stride, 204 | theta, 205 | initial_conv_width, 206 | initial_stride, 207 | initial_filters, 208 | initial_pool_width, 209 | initial_pool_stride, 210 | use_global_pooling=True)(model_input) 211 | output = Dense(num_outputs, activation="softmax")(output) 212 | super(DenseNet264, self).__init__(model_input, output) 213 | 214 | 215 | class DenseNetCustom(keras.models.Model): 216 | """ 217 | Create a Keras Model Object that is an implementation of DenseNet with a custom number of parameters. The number of layers per dense block can be specified by block_sizes. 218 | :param input_shape: The shape of the inputs without the batch dimension. This should be a valid 1D sequence, such as (244, 25). 219 | :param num_outputs: the number of classes to predict 220 | :param k: The "growth rate" of the DenseNet model 221 | :param block_sizes: A list of ints with the number of layers in each block. Example: [5, 10, 25, 17]. 222 | :param conv_kernel_width: The kernel width of each convolution in the dense blocks. 223 | :param bottleneck_size: The size of the bottleneck, as a multiple of k. Set to 0 for no bottleneck. 224 | :param transition_pool_size: pool_size in the transition layer 225 | :param transition_pool_stride: pooling stride in the transition layer 226 | :param theta: Amount of compression in the transition layer. Set to 1 for no compression. 227 | :param initial_conv_width: Kernel width for the one convolution before the dense blocks 228 | :param initial_stride: Stride for the one convolution before the dense blocks 229 | :param initial_filters: Number of filters for the one convolution before the dense blocks 230 | :param initial_pool_width: pool_size for the one pooling before the dense blocks 231 | :param initial_pool_stride: stride for the one pooling before the dense blocks 232 | """ 233 | def __init__( 234 | self, 235 | input_shape, 236 | num_outputs=1000, 237 | k=32, 238 | block_sizes=None, 239 | conv_kernel_width=3, 240 | bottleneck_size=4, 241 | transition_pool_size=2, 242 | transition_pool_stride=2, 243 | theta=0.5, 244 | initial_conv_width=7, 245 | initial_stride=2, 246 | initial_filters=64, 247 | initial_pool_width=3, 248 | initial_pool_stride=2): 249 | if not block_sizes: 250 | raise ValueError("block_sizes must be specified") 251 | model_input = Input(shape=input_shape) 252 | output = densenet.models.one_d.DenseNet( 253 | k, 254 | block_sizes, 255 | conv_kernel_width, 256 | bottleneck_size, 257 | transition_pool_size, 258 | transition_pool_stride, 259 | theta, 260 | initial_conv_width, 261 | initial_stride, 262 | initial_filters, 263 | initial_pool_width, 264 | initial_pool_stride, 265 | use_global_pooling=True)(model_input) 266 | output = Dense(num_outputs, activation="softmax")(output) 267 | super(DenseNet264, self).__init__(model_input, output) 268 | 269 | --------------------------------------------------------------------------------