├── train
├── dataset
│ ├── train_list.txt
│ ├── val_list.txt
│ ├── gt
│ │ ├── 4_1_bc.png
│ │ └── 6_1_bc.png
│ └── img
│ │ ├── 4_1_bc.jpg
│ │ └── 6_1_bc.jpg
├── to_static
│ ├── ocrnet_hrnet_w18_512x512.yml
│ └── to_static.sh
└── 3180492.ipynb
├── requirements.txt
├── docs
└── img
│ └── logo.png
├── .gitmodules
├── .gitignore
└── README.md
/train/dataset/train_list.txt:
--------------------------------------------------------------------------------
1 | img\6_1_bc.jpg gt\6_1_bc.png
2 |
--------------------------------------------------------------------------------
/train/dataset/val_list.txt:
--------------------------------------------------------------------------------
1 | img\4_1_bc.jpg gt\4_1_bc.png
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python
2 | paddlepaddle>=2.2.0
3 | paddleseg
--------------------------------------------------------------------------------
/docs/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geoyee/buildseg/HEAD/docs/img/logo.png
--------------------------------------------------------------------------------
/train/dataset/gt/4_1_bc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geoyee/buildseg/HEAD/train/dataset/gt/4_1_bc.png
--------------------------------------------------------------------------------
/train/dataset/gt/6_1_bc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geoyee/buildseg/HEAD/train/dataset/gt/6_1_bc.png
--------------------------------------------------------------------------------
/train/dataset/img/4_1_bc.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geoyee/buildseg/HEAD/train/dataset/img/4_1_bc.jpg
--------------------------------------------------------------------------------
/train/dataset/img/6_1_bc.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/geoyee/buildseg/HEAD/train/dataset/img/6_1_bc.jpg
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "train/weight"]
2 | path = train/weight
3 | url = git@github.com:geoyee/buildseg-params.git
4 |
--------------------------------------------------------------------------------
/train/to_static/ocrnet_hrnet_w18_512x512.yml:
--------------------------------------------------------------------------------
1 | model:
2 | num_classes: 2
3 | type: OCRNet
4 | backbone:
5 | type: HRNet_W18
6 | backbone_indices: [0]
7 |
8 | transforms:
9 | - type: Resize
10 | target_size: [512, 512]
11 | - type: Normalize
--------------------------------------------------------------------------------
/train/to_static/to_static.sh:
--------------------------------------------------------------------------------
1 | # Set up an available card
2 | # export CUDA_VISIBLE_DEVICES=0 # linux / macos
3 | set CUDA_VISIBLE_DEVICES=0 # windows
4 |
5 | # export static model
6 | python export.py --config ocrnet_hrnet_w18_512x512.yml \
7 | --model_path ocrnet_hrnet_w18_512x512_rs_building.pdparams \
8 | --save_dir static_weight
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # vscode
132 | *vscode
133 |
134 | # train/weight
135 | *.pdparams
136 |
137 | data
138 | *.tif
139 | *.tfw
140 | *.tif.aux.xml
141 | *.tif.vat.cpg
142 | *.tif.vat.dbf
143 | *.tif.xml
144 | *.tif.ovr
145 |
146 | buildseg/i18n/buildSeg.pro.user
147 |
148 | static_weight
149 | *.yaml
150 | *.pdmodel
151 | *.pdiparams
152 | *.pdiparams.info
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | 
4 | buildseg
5 |
6 |
7 |
8 | [](https://www.python.org/downloads/release/python-360/) [](https://www.python.org/downloads/release/python-360/) [](https://www.python.org/downloads/release/python-360/)
9 |
10 | buildseg is a Building Extraction plugin for QGIS based on PaddlePaddle.
11 |
12 | **Note: This repo will not be updated, and relevant work and maintenance updates are in [deepbands/buildseg](https://github.com/deepbands/buildseg).**
13 |
14 | 
15 |
16 | ## How to use
17 |
18 | 1. Download and install [QGIS](https://www.qgis.org/en/site/) and clone the repo :
19 | ``` git
20 | git clone git@github.com:geoyee/buildseg.git
21 | ```
22 |
23 | 2. Install requirements :
24 | - Enter the folder and install dependent libraries using OSGeo4W shell (Open As Administrator) :
25 | ``` shell
26 | cd buildseg
27 | pip install -r requirements.txt
28 | ```
29 | - Or open OSGeo4W shell as administrator and enter :
30 | ``` shell
31 | pip install opencv-python paddlepaddle>=2.2.0 paddleseg --user
32 | ```
33 |
34 | 3. Copy folder named buildseg in QGIS configuration folder and choose the plugin from plugin manager in QGIS (If not appeared restart QGIS).
35 | - You can know this folder from QGIS Setting Menu at the top-left of QGIS UI `Settings > User Profiles > Open Active Profile Folder` .
36 | - Go to `python/plugins` then paste the buildseg folder.
37 | - Full path should be like : `C:\Users\$USER\AppData\Roaming\QGIS\QGIS3\profiles\default\python\plugins\buildseg`.
38 |
39 | 4. Open QGIS, load your raster and select the parameter file ([*.pdiparams](https://cloud.a-boat.cn:2021/share/3xda5wmV)) then click `ok`.
40 |
41 | ## Model and Parameter
42 |
43 | | Model | Backbone | Resolution | mIoU | Params(MB) | Inference Time(ms) | Links |
44 | | :----: | :-------: | :--------: | :----: | :--------: | :----------------: | :----------------------------------------------------------: |
45 | | OCRNet | HRNet_W18 | 512x512 | 90.64% | 46.4 | / | [Static Weight](https://cloud.a-boat.cn:2021/share/3xda5wmV) |
46 |
47 | - Train/Eval Dataset : [Link](https://aistudio.baidu.com/aistudio/datasetdetail/102929).
48 | - We have done all testing and development using : Tesla V100 32G in [AI Studio](https://aistudio.baidu.com/aistudio/index).
49 |
50 | ## TODO
51 |
52 | - [x] Extract building on 512x512 remote sensing images.
53 | - [x] Extract building on big remote sensing images through splitting it into small tiles, extract buildings then mosaic it back (merge) to a full extent.
54 | - [x] Replace the model and parameters (large-scale data).
55 | - [x] Convert to static weight (\*.pdiparams) instead of dynamic model (\*.pdparams).
56 | - [x] Add a Jupyter Notebook (\*.ipynb) about how to fine-tune parameters using other's datasets based on PaddleSeg.
57 | - [ ] Hole digging inside the polygons.
58 | - [ ] Convert raster to Shapefile/GeoJson by GDAL/OGR (gdal.Polygonize) instead of findContours in OpenCV.
59 | - [ ] Update plugin's UI :
60 | - [X] Add menu to select one raster file from QGIS opened raster layers.
61 | - [ ] Select the Parameter path one time (some buggy windows appear when importing the \*.pdiparams file).
62 | - [ ] Define the output path of the vector file (Direct Path or Temporary in the memory).
63 | - [ ] Accelerate, etc.
64 | - [ ] Add another model, like Vision Transform.
65 |
--------------------------------------------------------------------------------
/train/3180492.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "collapsed": false
7 | },
8 | "source": [
9 | "### 1. Install dependent libraries"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "collapsed": false
17 | },
18 | "outputs": [],
19 | "source": [
20 | "# ! pip install -q paddleseg"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {
26 | "collapsed": false
27 | },
28 | "source": [
29 | "### 2. Unzip datasets\n",
30 | "**Data organization**\n",
31 | "```\n",
32 | "dataset\n",
33 | " ├ img\n",
34 | " │ ├ build0.jpg\n",
35 | " │ └ ....jpg\n",
36 | " └ gt\n",
37 | " ├ build0.png\n",
38 | " └ ....png\n",
39 | "```\n",
40 | "**Label pixel**\n",
41 | "```\n",
42 | "0 background\n",
43 | "1 building\n",
44 | "```\n",
45 | "**Image size**\n",
46 | "```\n",
47 | "512x512\n",
48 | "```"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "metadata": {
55 | "collapsed": false
56 | },
57 | "outputs": [],
58 | "source": [
59 | "# ! mkdir -p dataset # create a folder for save dataset\n",
60 | "# ! unzip -oq data.zip -d dataset # unzip the zip data to the dataset folder"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "import os\n",
70 | "\n",
71 | "\n",
72 | "PATH = os.getcwd()\n",
73 | "print(PATH)"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {
79 | "collapsed": false
80 | },
81 | "source": [
82 | "### 3. Split datasets and create data_list"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {
89 | "collapsed": false
90 | },
91 | "outputs": [],
92 | "source": [
93 | "import os\n",
94 | "import os.path as osp\n",
95 | "import random\n",
96 | "\n",
97 | "\n",
98 | "def create_list(data_path: str, val_num: int=2000) -> None:\n",
99 | " \"\"\" create list.\n",
100 | " args:\n",
101 | " data_path (str): dataset folder.\n",
102 | " val_num (int, optional): number of evaluation data.\n",
103 | " \"\"\"\n",
104 | " image_path = osp.join(data_path, \"img\")\n",
105 | " data_names = os.listdir(image_path)\n",
106 | " random.shuffle(data_names) # scramble data\n",
107 | " with open(os.path.join(data_path, \"train_list.txt\"), \"w\") as tf:\n",
108 | " with open(os.path.join(data_path, \"val_list.txt\"), \"w\") as vf:\n",
109 | " for idx, data_name in enumerate(data_names):\n",
110 | " img = os.path.join(\"img\", data_name)\n",
111 | " lab = os.path.join(\"gt\", data_name.replace(\"jpg\", \"png\"))\n",
112 | " if idx < val_num:\n",
113 | " vf.write(img + \" \" + lab + \"\\n\")\n",
114 | " else:\n",
115 | " tf.write(img + \" \" + lab + \"\\n\")\n",
116 | " print(\"Data list generation completed\")\n",
117 | "\n",
118 | "\n",
119 | "create_list(osp.join(PATH, \"dataset\"), 1)"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "metadata": {
125 | "collapsed": false
126 | },
127 | "source": [
128 | "### 4. Create PaddlePaddle Dataset"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {
135 | "collapsed": false
136 | },
137 | "outputs": [],
138 | "source": [
139 | "import paddleseg.transforms as T\n",
140 | "from paddleseg.datasets import Dataset\n",
141 | "\n",
142 | "\n",
143 | "# build the training set\n",
144 | "train_transforms = [T.RandomHorizontalFlip(),\n",
145 | " T.RandomVerticalFlip(),\n",
146 | " T.RandomRotation(),\n",
147 | " T.RandomScaleAspect(),\n",
148 | " T.RandomBlur(),\n",
149 | " T.Resize(target_size=(512, 512)),\n",
150 | " T.Normalize()]\n",
151 | "train_dataset = Dataset(transforms=train_transforms,\n",
152 | " dataset_root=osp.join(PATH, \"dataset\"),\n",
153 | " num_classes=2,\n",
154 | " mode=\"train\",\n",
155 | " train_path=osp.join(PATH, \"dataset/train_list.txt\"),\n",
156 | " separator=\" \")\n",
157 | "\n",
158 | "# build validation set\n",
159 | "val_transforms = [T.Resize(target_size=(512, 512)),\n",
160 | " T.Normalize()]\n",
161 | "val_dataset = Dataset(transforms=val_transforms,\n",
162 | " dataset_root=osp.join(PATH, \"dataset\"),\n",
163 | " num_classes=2,\n",
164 | " mode=\"val\",\n",
165 | " val_path=osp.join(PATH, \"dataset/val_list.txt\"),\n",
166 | " separator=\" \")"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {
172 | "collapsed": false
173 | },
174 | "source": [
175 | "### 5. Select model"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {
182 | "collapsed": false
183 | },
184 | "outputs": [],
185 | "source": [
186 | "import paddle\n",
187 | "from paddleseg.models import OCRNet, HRNet_W18\n",
188 | "\n",
189 | "\n",
190 | "model = OCRNet(num_classes=2,\n",
191 | " backbone=HRNet_W18(),\n",
192 | " backbone_indices=[0],\n",
193 | " pretrained=osp.join(PATH, \"weight/ocrnet_hrnet_w18_512x512_rs_building.pdparams\"))"
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "metadata": {
199 | "collapsed": false
200 | },
201 | "source": [
202 | "### 6. Set super-parameters"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {
209 | "collapsed": false
210 | },
211 | "outputs": [],
212 | "source": [
213 | "from paddleseg.models.losses import MixedLoss, BCELoss, DiceLoss\n",
214 | "\n",
215 | "\n",
216 | "base_lr = 3e-5\n",
217 | "epochs = 5\n",
218 | "batch_size = 1\n",
219 | "\n",
220 | "iters = epochs * len(train_dataset) // batch_size\n",
221 | "lr = paddle.optimizer.lr.PolynomialDecay(base_lr, decay_steps=iters // epochs, power=0.9)\n",
222 | "optimizer = paddle.optimizer.Adam(lr, parameters=model.parameters())\n",
223 | "losses = {}\n",
224 | "losses[\"types\"] = [MixedLoss([BCELoss(), DiceLoss()], [1, 1])] * 2\n",
225 | "losses[\"coef\"] = [1] * 2"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {
231 | "collapsed": false
232 | },
233 | "source": [
234 | "### 7. Train"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": null,
240 | "metadata": {
241 | "collapsed": false
242 | },
243 | "outputs": [],
244 | "source": [
245 | "from paddleseg.core import train\n",
246 | "\n",
247 | "\n",
248 | "train(model=model,\n",
249 | " train_dataset=train_dataset,\n",
250 | " val_dataset=val_dataset,\n",
251 | " optimizer=optimizer,\n",
252 | " save_dir=osp.join(PATH, \"output\"),\n",
253 | " iters=iters,\n",
254 | " batch_size=batch_size,\n",
255 | " save_interval=iters // 5,\n",
256 | " log_iters=10,\n",
257 | " num_workers=0,\n",
258 | " losses=losses,\n",
259 | " use_vdl=True)"
260 | ]
261 | }
262 | ],
263 | "metadata": {
264 | "interpreter": {
265 | "hash": "779fd89e231d504f3761036cc866499d7be8785d89f9802b1abf9e02a6b7fe30"
266 | },
267 | "kernelspec": {
268 | "display_name": "Python 3",
269 | "language": "python",
270 | "name": "python3"
271 | },
272 | "language_info": {
273 | "codemirror_mode": {
274 | "name": "ipython",
275 | "version": 3
276 | },
277 | "file_extension": ".py",
278 | "mimetype": "text/x-python",
279 | "name": "python",
280 | "nbconvert_exporter": "python",
281 | "pygments_lexer": "ipython3",
282 | "version": "3.8.10"
283 | }
284 | },
285 | "nbformat": 4,
286 | "nbformat_minor": 1
287 | }
288 |
--------------------------------------------------------------------------------