├── shapefile
├── MEIC-0P25.cpg
├── CN-Province.dbf
├── CN-Province.sbn
├── CN-Province.sbx
├── CN-Province.shp
├── CN-Province.shx
├── MEIC-0P25.dbf
├── MEIC-0P25.sbn
├── MEIC-0P25.sbx
├── MEIC-0P25.shp
├── MEIC-0P25.shx
├── MEIC-0P25.prj
└── CN-Province.prj
├── Doc
├── 7-1.png
├── 7-2.png
├── 7-3.png
├── 7-4.png
├── 7-5.png
├── 7-6.png
├── zonalst_nox.png
├── monthly_profile.png
├── speices_warning.png
├── year2month_output.png
├── meic2geotiff_output.png
├── meic_2017_PMcoarse.png
├── original_meic_files.png
├── converted_meic_files.png
├── how_to_treat_the_emssion_which_resolution_is_fine_cn.md
├── how_to_do_vertical_allocation_cn.md
├── how_to_use_shapefile_for_mask_cn.md
├── how_to_treat_the_yearly_emission_cn.md
├── how_to_combine_meic_and_mix_cn.md
├── adopt_local_emission_to_meic_cn.md
└── adopt_meic_for_prd_emission_file_cn.md
├── .gitattributes
├── UTIL
├── combine
│ ├── src.pyd
│ └── combine.py
└── rename_original_inventory
│ ├── src.pyd
│ ├── README.md
│ └── rename_original_inventory_(pollutant).py
├── requirement.txt
├── profile.csv
├── input
├── GRIDDESC.CN27km
└── GRIDDESC.PRD274x181
├── temporal
├── weekly.csv
├── monthly.csv
└── hourly.csv
├── coarse_emission_2_fine_emission.py
├── Create-CMAQ-Emission-File.py
├── fine_emission_2_coarse_emission.py
├── vertical_allocation.py
├── allocator
└── README.md
├── Create_CMAQ_mask.py
├── .gitignore
├── namelist.input
├── README.rst
├── species
├── MEIC-CB05_CB06_speciate_agriculture.csv
├── MEIC-CB05_CB06_speciate_industry.csv
├── MEIC-CB05_CB06_speciate_power.csv
├── MEIC-CB05_CB06_speciate_residential.csv
├── MEIC-CB05_CB06_speciate_transportation.csv
├── MEIC-SAPRC07_SAPRC07_speciate_power.csv
├── MEIC-SAPRC07_SAPRC07_speciate_agriculture.csv
├── MEIC-SAPRC07_SAPRC07_speciate_industry.csv
├── MEIC-SAPRC07_SAPRC07_speciate_residential.csv
└── MEIC-SAPRC07_SAPRC07_speciate_transportation.csv
├── README.CN.md
├── PREP
├── heic_2_GeoTiff.py
├── meic_2_GeoTiff.py
├── mix_2_GeoTiff.py
└── CEDSv2_2_GeoTiff.py
├── year2month.py
├── calculate-pmc.py
└── src.py
/shapefile/MEIC-0P25.cpg:
--------------------------------------------------------------------------------
1 | UTF-8
--------------------------------------------------------------------------------
/Doc/7-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-1.png
--------------------------------------------------------------------------------
/Doc/7-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-2.png
--------------------------------------------------------------------------------
/Doc/7-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-3.png
--------------------------------------------------------------------------------
/Doc/7-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-4.png
--------------------------------------------------------------------------------
/Doc/7-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-5.png
--------------------------------------------------------------------------------
/Doc/7-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/7-6.png
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/Doc/zonalst_nox.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/zonalst_nox.png
--------------------------------------------------------------------------------
/UTIL/combine/src.pyd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/UTIL/combine/src.pyd
--------------------------------------------------------------------------------
/Doc/monthly_profile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/monthly_profile.png
--------------------------------------------------------------------------------
/Doc/speices_warning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/speices_warning.png
--------------------------------------------------------------------------------
/Doc/year2month_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/year2month_output.png
--------------------------------------------------------------------------------
/shapefile/CN-Province.dbf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/CN-Province.dbf
--------------------------------------------------------------------------------
/shapefile/CN-Province.sbn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/CN-Province.sbn
--------------------------------------------------------------------------------
/shapefile/CN-Province.sbx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/CN-Province.sbx
--------------------------------------------------------------------------------
/shapefile/CN-Province.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/CN-Province.shp
--------------------------------------------------------------------------------
/shapefile/CN-Province.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/CN-Province.shx
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.dbf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/MEIC-0P25.dbf
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.sbn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/MEIC-0P25.sbn
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.sbx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/MEIC-0P25.sbx
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/MEIC-0P25.shp
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/shapefile/MEIC-0P25.shx
--------------------------------------------------------------------------------
/Doc/meic2geotiff_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/meic2geotiff_output.png
--------------------------------------------------------------------------------
/Doc/meic_2017_PMcoarse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/meic_2017_PMcoarse.png
--------------------------------------------------------------------------------
/Doc/original_meic_files.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/original_meic_files.png
--------------------------------------------------------------------------------
/Doc/converted_meic_files.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/Doc/converted_meic_files.png
--------------------------------------------------------------------------------
/UTIL/rename_original_inventory/src.pyd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Airwhf/MEIAT-CMAQ/HEAD/UTIL/rename_original_inventory/src.pyd
--------------------------------------------------------------------------------
/requirement.txt:
--------------------------------------------------------------------------------
1 | f90nml==1.4.3
2 | pyioapi==0.2.2
3 | geopandas==0.12.2
4 | PseudoNetCDF==3.2.2
5 | rioxarray==0.14.1
6 | tqdm==4.64.1
7 |
--------------------------------------------------------------------------------
/profile.csv:
--------------------------------------------------------------------------------
1 | vglvltop,power,industry
2 | 1,0,0.06
3 | 0.995,0,0.16
4 | 0.99,0.0025,0.75
5 | 0.985,0.51,0.03
6 | 0.98,0.453,0
7 | 0.975,0.0325,0
8 | 0.97,0.002,0
9 |
--------------------------------------------------------------------------------
/shapefile/MEIC-0P25.prj:
--------------------------------------------------------------------------------
1 | GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]
--------------------------------------------------------------------------------
/input/GRIDDESC.CN27km:
--------------------------------------------------------------------------------
1 | ' '
2 | 'LamCon_34N_110E'
3 | 2 25.000 40.000 102.000 102.000 34.000
4 | ' '
5 | 'CN27km'
6 | 'LamCon_34N_110E' -2511000.000 -1775000.000 27000.000 27000.000 186 156 1
7 | ' '
8 |
--------------------------------------------------------------------------------
/input/GRIDDESC.PRD274x181:
--------------------------------------------------------------------------------
1 | ' '
2 | 'LamCon_40N_97W'
3 | 2 25.000 40.000 110.000 110.000 28.500
4 | ' '
5 | 'PRD274x181'
6 | 'LamCon_40N_97W' 48000.000 -902500.000 3000.000 3000.000 271 178 1
7 | ' '
8 |
--------------------------------------------------------------------------------
/temporal/weekly.csv:
--------------------------------------------------------------------------------
1 | weekly,power,industry,residential,transportation,agriculture
2 | 0,0.13,0.078,0.143,0.108,0.143
3 | 1,0.147,0.162,0.143,0.155,0.143
4 | 2,0.147,0.162,0.143,0.155,0.143
5 | 3,0.147,0.162,0.143,0.155,0.143
6 | 4,0.147,0.162,0.143,0.155,0.143
7 | 5,0.147,0.162,0.143,0.155,0.143
8 | 6,0.135,0.112,0.143,0.117,0.143
9 |
--------------------------------------------------------------------------------
/coarse_emission_2_fine_emission.py:
--------------------------------------------------------------------------------
1 | from src import *
2 |
3 | os.environ["IOAPI_ISPH"] = "6370000."
4 |
5 |
6 | if __name__ == '__main__':
7 | start_time = time.time()
8 | main_coarse2fine()
9 | end_time = time.time()
10 | elapsed_time = end_time - start_time
11 | print(f"### Time consuming: {elapsed_time} s ###")
12 |
--------------------------------------------------------------------------------
/UTIL/rename_original_inventory/README.md:
--------------------------------------------------------------------------------
1 | # 重命名原始排放清单文件名
2 |
3 | * 修改输入参数
4 | ```python
5 | tbl_name = {"org": ["PMcoarse"],
6 | "new": ["PMC"]}
7 |
8 | input_dir = r"H:\MEIC\GeoTiff-2017"
9 | output_dir = r"H:\MEIC\GeoTiff-2017_rename"
10 | ```
11 | * 运行代码
12 | ```shell
13 | cd UTIL/rename_original_inventory
14 | python ./rename_original_inventory_(pollutant).py
15 | ```
16 |
17 |
--------------------------------------------------------------------------------
/shapefile/CN-Province.prj:
--------------------------------------------------------------------------------
1 | PROJCS["China_Lambert_Conformal_Conic",GEOGCS["GCS_Beijing_1954",DATUM["D_Beijing_1954",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",105.0],PARAMETER["Standard_Parallel_1",30.0],PARAMETER["Standard_Parallel_2",62.0],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]
--------------------------------------------------------------------------------
/Create-CMAQ-Emission-File.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/3/15 16:34
4 | # @Author :Haofan Wang
5 | # @Email :wanghf58@mail2.sysu.edu.cn
6 |
7 | from src import *
8 |
9 | os.environ["IOAPI_ISPH"] = "6370000."
10 |
11 |
12 | if __name__ == "__main__":
13 | start_time = time.time()
14 | main_createCMAQ()
15 | end_time = time.time()
16 | elapsed_time = end_time - start_time
17 | print(f"### Time consuming: {elapsed_time} s ###")
18 |
--------------------------------------------------------------------------------
/temporal/monthly.csv:
--------------------------------------------------------------------------------
1 | monthly,power,industry,residential,transportation,agriculture
2 | 1,0.115,0.0833,0.0833,0.0833,0.12
3 | 2,0.115,0.0833,0.0833,0.0833,0.1
4 | 3,0.1,0.0833,0.0833,0.0833,0.05
5 | 4,0.095,0.0833,0.0833,0.0833,0.01
6 | 5,0.08,0.0833,0.0833,0.0833,0.15
7 | 6,0.05,0.0833,0.0833,0.0833,0.15
8 | 7,0.05,0.0833,0.0833,0.0833,0.05
9 | 8,0.05,0.0833,0.0833,0.0833,0.01
10 | 9,0.05,0.0833,0.0833,0.0833,0.01
11 | 10,0.09,0.0833,0.0833,0.0833,0.05
12 | 11,0.095,0.0833,0.0833,0.0833,0.15
13 | 12,0.11,0.0833,0.0833,0.0833,0.15
14 |
--------------------------------------------------------------------------------
/UTIL/rename_original_inventory/rename_original_inventory_(pollutant).py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/5/11 15:15
4 | # @Author :Haofan Wang
5 | # @Email :wanghf58@mail2.sysu.edu.cn
6 |
7 | from src import *
8 |
9 | if __name__ == "__main__":
10 |
11 | tbl_name = {"org": ["PMcoarse"],
12 | "new": ["PMC"]}
13 |
14 | input_dir = r"H:\MEIC\GeoTiff-2017"
15 | output_dir = r"H:\MEIC\GeoTiff-2017_rename"
16 |
17 | main_rename_original_pollutant(tbl_name, input_dir, output_dir)
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/fine_emission_2_coarse_emission.py:
--------------------------------------------------------------------------------
1 | # @Time : 2023/02/21 19:17
2 | # @Author : Haofan Wang
3 | # @Version : python3.9
4 | # @Email : wanghf58@mail2.sysu.edu.cn
5 | import os
6 |
7 | from src import *
8 |
9 | os.environ["IOAPI_ISPH"] = "6370000."
10 | # Ignore the warning information from pandas.
11 | pd.options.mode.chained_assignment = None
12 |
13 |
14 | if __name__ == "__main__":
15 | start_time = time.time()
16 | main_f2c()
17 | end_time = time.time()
18 | elapsed_time = end_time - start_time
19 | print(f"### Time consuming: {elapsed_time} s ###")
20 |
--------------------------------------------------------------------------------
/vertical_allocation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2022/11/22 16:03
4 | # @Author :Haofan Wang
5 |
6 | from src import *
7 |
8 | if __name__ == "__main__":
9 | sectors = ['power', 'industry']
10 |
11 | start_time = time.time()
12 | for sector in sectors:
13 | files = glob.glob(fr"output\*_{sector}_*.nc")
14 | main_vertical_allocation(files)
15 |
16 | end_time = time.time()
17 | elapsed_time = end_time - start_time
18 | print(f"### Time consuming: {elapsed_time} s ###")
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/Doc/how_to_treat_the_emssion_which_resolution_is_fine_cn.md:
--------------------------------------------------------------------------------
1 | # 7.原始清单分辨率如果小于模拟域网格分辨率应该如何处理?
2 |
3 | ------------------------
4 |
5 | **作者:邱嘉馨**
6 |
7 | ------------------------
8 |
9 | 1.首先,修改namelist.input,设置好网格信息文件GRIDDESC以及网格名等变量,以及时间分配系数文件和物种分配系数文件等,具体可参考步骤一。
10 |
11 | 
12 |
13 | 2.打开终端,运行将精细化清单处理到粗网格程序python ./fine_emission_2_coarse_emission.py
14 |
15 | 
16 |
17 | 如上图所示,提示找不到PMC,检查清单文件发现只有PM25和PM10,使用程序calculate-pmc.py计算生成PMC文件即可解决。
18 |
19 | 
20 |
21 | 
22 |
23 | 3.重新运行步骤2 ,出现如下界面即运行结束。在output文件夹下生成清单文件。
24 |
25 | 
26 |
27 | 
--------------------------------------------------------------------------------
/allocator/README.md:
--------------------------------------------------------------------------------
1 | # Allocator
2 |
3 | 这里是一个用于存放空间分配因子的目录。
4 |
5 | 在运行[coarse_emission_2_fine_emission.py](../coarse_emission_2_fine_emission.py)以前,你需要下载这些数据用于空间分配。
6 |
7 | # Download
8 |
9 | **您可以到以下链接中选择性下载您所需要的数据。**
10 | 链接:https://pan.baidu.com/s/1g2-YYE3uuw24iWw_82dedA?pwd=whff
11 |
12 | 1. 土地利用类型数据
13 | * 农田:agriculture.zip
14 | * 工矿用地:industry.zip or power.zip
15 |
16 | 2. 人口密度数据
17 | * 2017年人口密度数据集:landscan-global-2017_nodata.zip
18 | * 2018年人口密度数据集:landscan-global-2018_nodata.zip
19 |
20 | 3. 道路数据
21 | * 高速路:motorway.zip
22 | * 主干道:primary.zip
23 | * 二级干道:secondary.zip
24 | * 社区道路:residential.zip
25 | * 道路数据栅格化:roads-nodata-0.zip
26 |
27 | 4. 平均分配数据
28 | * 平均分配数据:ave-nodata-0.zip
29 |
--------------------------------------------------------------------------------
/Doc/how_to_do_vertical_allocation_cn.md:
--------------------------------------------------------------------------------
1 | # 如何对排放文件进行垂直分配?
2 |
3 | --------------------
4 |
5 | **作者:王浩帆**
6 |
7 | --------------------
8 |
9 | 面源排放的垂直分配过程是使用[vertical_allocation.py](../vertical_allocation.py)
10 |
11 | * 程序提供了power和industry两个部门的垂直分配方案,分别是`profile-industry.csv`和`profile-power.csv`,用户也可以按照已提供的两个文件格式自定义垂直分配系数。
12 |
13 | 由于[vertical_allocation.py](../vertical_allocation.py)只能够识别文件名为`profile.csv`的文件,所以在进行分配时,需要将文件复制为`profile.csv`。
14 |
15 | 以下步骤通过对power部门的排放来进行说明:
16 |
17 | 1. 复制`profile-power.csv`到`profile.csv`。
18 |
19 | 2. 打开[vertical_allocation.py](../vertical_allocation.py)修改以下代码:
20 |
21 | ```python
22 | files = glob.glob(r"output\*_power_*.nc")
23 | ```
24 |
25 | 3. 在终端输入以下命令即可开始运行。
26 |
27 | ```shell
28 | python .\vertical_allocation.py
29 | ```
30 |
31 | 4. 在`output/vertical`路径下可以看到垂直分配后的结果。
32 |
--------------------------------------------------------------------------------
/Create_CMAQ_mask.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/5/11 13:27
4 | # @Author :Haofan Wang
5 | # @Email :wanghf58@mail2.sysu.edu.cn
6 |
7 |
8 | from src import *
9 |
10 | os.environ['IOAPI_ISPH'] = '6370000.'
11 |
12 | if __name__ == "__main__":
13 | # ------------------------------------------------------------
14 | # Shapefile path.
15 | shapefile_path = "shapefile/Anyang-around"
16 | # The field of each area.
17 | field = "NAME"
18 | # The output path.
19 | output_name = "output/mask.nc"
20 | # ------------------------------------------------------------
21 |
22 | start_time = time.time()
23 | main_create_CMAQ_mask(shapefile_path, field, output_name)
24 | end_time = time.time()
25 | elapsed_time = end_time - start_time
26 | print(f"### Time consuming: {elapsed_time} s ###")
27 |
28 |
29 |
--------------------------------------------------------------------------------
/Doc/how_to_use_shapefile_for_mask_cn.md:
--------------------------------------------------------------------------------
1 | # 如何使用shapefile文件生成CMAQ中的mask文件?
2 |
3 | ------------------------
4 |
5 | **作者:邱嘉馨**
6 |
7 | ------------------------
8 |
9 | ## 1. 准备shapefile文件
10 |
11 | shapefile文件主要有以下几点要求:
12 |
13 | * 投影必须为WGS1984投影。
14 | * 属性表中不能有中文字符。
15 | * 属性表中必须有字符串字段被程序提取。
16 | * 该字符串字段中的必须为大写字母。
17 |
18 | 具体格式可以参考:[Anyang-around.shp](../shapefile/Anyang-around.shp)
19 |
20 | ## 2. 配置好文件
21 |
22 | 除了上述需要在代码中修改的部分以外, `GRIDDESC`文件也是程序所必须的,但是此文件将从namelist.input中去获取。
23 |
24 | ```python
25 | # ------------------------------------------------------------
26 | # Shapefile path.
27 | shapefile_path = "shapefile文件路径"
28 | # The field of each area.
29 | field = "字符串字段名称"
30 | # The output path.
31 | output_name = "输出文件名称"
32 | # ------------------------------------------------------------
33 | ```
34 |
35 | ## 3. 运行程序
36 |
37 | 在终端中输入:
38 |
39 | ```shell
40 | python ./Create_CMAQ_mask.py
41 | ```
42 |
43 | 即可运行,并输出文件。
44 |
45 |
46 |
--------------------------------------------------------------------------------
/temporal/hourly.csv:
--------------------------------------------------------------------------------
1 | hourly,power,industry,residential,transportation,agriculture
2 | 0,0.032,0.026,0.038,0.017,0.026
3 | 1,0.03,0.007,0.038,0.013,0.019
4 | 2,0.029,0.007,0.03,0.014,0.019
5 | 3,0.028,0.007,0.045,0.015,0.018
6 | 4,0.029,0.007,0.045,0.016,0.019
7 | 5,0.032,0.007,0.038,0.016,0.021
8 | 6,0.035,0.007,0.03,0.029,0.029
9 | 7,0.04,0.029,0.03,0.056,0.033
10 | 8,0.0433,0.045,0.038,0.0599,0.0473
11 | 9,0.0457,0.068,0.038,0.059,0.0576
12 | 10,0.0479,0.068,0.03,0.0594,0.07
13 | 11,0.0495,0.068,0.045,0.0501,0.0885
14 | 12,0.0495,0.068,0.045,0.0501,0.0885
15 | 13,0.0497,0.068,0.038,0.0588,0.0823
16 | 14,0.0501,0.068,0.03,0.06,0.0803
17 | 15,0.05,0.068,0.03,0.062,0.07
18 | 16,0.0497,0.068,0.038,0.0594,0.0597
19 | 17,0.0489,0.066,0.075,0.0574,0.0453
20 | 18,0.0477,0.063,0.075,0.0557,0.0309
21 | 19,0.0473,0.037,0.075,0.049,0.0268
22 | 20,0.0466,0.037,0.075,0.0454,0.0226
23 | 21,0.044,0.037,0.054,0.0417,0.0206
24 | 22,0.0397,0.037,0.018,0.0308,0.0206
25 | 23,0.0352,0.037,0.018,0.0216,0.0206
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | __pycache__/
3 | allocator/*.tif
4 | allocator/*.zip
5 | output/
6 | allocator/motorway.cpg
7 | allocator/motorway.dbf
8 | allocator/motorway.prj
9 | allocator/motorway.sbn
10 | allocator/motorway.sbx
11 | allocator/motorway.shp
12 | allocator/motorway.shx
13 | allocator/primary.cpg
14 | allocator/primary.dbf
15 | allocator/primary.prj
16 | allocator/primary.sbn
17 | allocator/primary.sbx
18 | allocator/primary.shp
19 | allocator/primary.shx
20 | allocator/residential.cpg
21 | allocator/residential.dbf
22 | allocator/residential.prj
23 | allocator/residential.sbn
24 | allocator/residential.sbx
25 | allocator/residential.shp
26 | allocator/residential.shx
27 | allocator/secondary.cpg
28 | allocator/secondary.dbf
29 | allocator/secondary.prj
30 | allocator/secondary.sbn
31 | allocator/secondary.sbx
32 | allocator/secondary.shp
33 | allocator/secondary.shx
34 | comparison/emission_transportation_20220102.nc
35 | build/
36 | src.c
37 | test.py
38 | input/GRIDDESC.CN245x195
39 | mask.nc
40 |
--------------------------------------------------------------------------------
/namelist.input:
--------------------------------------------------------------------------------
1 | &global
2 | griddesc_file = "input/GRIDDESC.PRD274x181"
3 | griddesc_name = "PRD274x181"
4 | big_grid_file = "shapefile/MEIC-0P25.shp"
5 | geotiff_dir = "H:/MEIC/GeoTiff-2017"
6 | inventory_label = "MEIC"
7 | inventory_year = "2017"
8 | sectors = 'transportation', 'residential', 'power', 'agriculture', 'industry'
9 | allocator = 'line', 'landscan-global-2017_nodata.tif', 'power.tif', 'agriculture.tif', 'industry.tif',
10 | allocator_type = "line", "raster", "raster", "raster", "raster"
11 | inventory_mechanism = "MEIC-CB05"
12 | target_mechanism = "CB06"
13 | start_date = "2020-07-01"
14 | end_date = "2020-07-02"
15 | cores = 4
16 | /
17 |
18 | &line
19 | line_files = "motorway.shp", "primary.shp", "residential.shp", "secondary.shp"
20 | line_factors = 0.435798, 0.326848, 0.081712, 0.155642,
21 | /
22 |
23 | &control
24 | create_grid = 1,
25 | grid_info = 1,
26 | create_factor = 1,
27 | coarse_emission = 1,
28 | create_source = 1,
29 | /
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | MEIAT-CMAQ v1.4-OA User's Guide
2 | --------------------------------
3 | |doi|
4 |
5 | .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.8001532.svg
6 | :target: https://doi.org/10.5281/zenodo.8001532
7 |
8 | MEIAT-CMAQ: 一款针对CMAQ模型的排放清单分配工具。
9 |
10 | 作者: `王浩帆 `_,邱嘉馨
11 |
12 | **联系作者**
13 |
14 | Email: wanghf58@mail2.sysu.edu.cn
15 |
16 | QQ群: 805594259
17 |
18 | 微信公众号:小王的科研笔记
19 |
20 | 如何引用(这真的很重要!!!)
21 | ============================
22 |
23 | **非常重要**:如果大家使用或者参考了本工具进行了一些工作,请大家引用下面的文章,这对我真的非常重要,谢谢!!!
24 |
25 | **Wang, H.**, Qiu, J., Liu, Y., Fan, Q., Lu, X., Zhang, Y., Wu, K., ..., & Sun, J., Wang, H., (2024). MEIAT-CMAQ: A Modular Emission Inventory Allocation Tool for Community Multiscale Air Quality Model. Atmospheric Environment, 2024, 120614.
26 |
27 | url: https://doi.org/10.1016/j.atmosenv.2024.120604
28 |
29 |
30 | 教程合集
31 | =======
32 |
33 | `点击此处进行查看。 `_
34 |
35 |
36 | 反馈问题
37 | =======
38 |
39 | 如果在您的使用过程中遇到了任何问题,推荐通过`此网页 `_进行反馈,我会及时回复。
40 |
41 |
--------------------------------------------------------------------------------
/Doc/how_to_treat_the_yearly_emission_cn.md:
--------------------------------------------------------------------------------
1 | # 如何处理只有年总量的排放清单?
2 |
3 | ------------------------
4 |
5 | **作者:邱嘉馨**
6 |
7 | ------------------------
8 |
9 | 本教程讲解如何使用年总量的排放清单。
10 |
11 | 1. 首先,使用[year2month.py](../year2month.py)将meic年清单的asc格式转为tiff(注意检查输出文件名的格式,部门之间是两个下划线)。
12 |
13 | ```python
14 | input_dir = r"MEIC清单asc格式文件存放位置"
15 | output_dir = r"MEIC清单tiff格式文件的输出位置"
16 | ```
17 | 配置好代码以后,在终端中输入命令:
18 |
19 | ```shell
20 | python ./PREP/meic_2_GeoTiff.py
21 | ```
22 |
23 | 运行成功以后将会在输出路径下看到GeoTiff格式文件,如图所示:
24 | 
25 |
26 | 2. 使用[year2month.py](../year2month.py)将年清单转换为月清单。
27 |
28 | 设置文件读取和输出路径:
29 |
30 | ```python
31 | input_dir = r"年清单的tiff格式文件的存放路径"
32 | output_dir = r"月清单的输出路径"
33 | ```
34 |
35 | 设置月分配系数:
36 |
37 | 月时间分配系数文件为[monthly.csv](../temporal/monthly.csv),可以通过修改此文件设置不同部门的
38 | 月分配系数,注意列标题和清单文件名中的部门拼写保持一致。
39 |
40 | 
41 |
42 | 如上配置好代码和时间分配系数表以后,在终端中输入以下命令并运行即可。
43 |
44 | ```shell
45 | python ./year2month.py
46 | ```
47 |
48 | 运行成功以后,将会在输出路径下看到以下逐月的清单文件:
49 | 
50 |
51 | 3. 后续步骤见表1。
--------------------------------------------------------------------------------
/species/MEIC-CB05_CB06_speciate_agriculture.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | CB05_ALD2,ALD2,1,44.03499,Mmol,mol/s
3 | CB05_ALDX,ALDX,1,39.22808,Mmol,mol/s
4 | CB05_ETH,ETH,1,28.053,Mmol,mol/s
5 | CB05_ETOH,ETOH,1,30.069,Mmol,mol/s
6 | CB05_FORM,FORM,1,45.8447,Mmol,mol/s
7 | CB05_IOLE,IOLE,1,30.026,Mmol,mol/s
8 | CB05_ISOP,ISOP,1,57.09965,Mmol,mol/s
9 | CB05_MEOH,MEOH,1,68.117,Mmol,mol/s
10 | CB05_OLE,OLE,1,32.042,Mmol,mol/s
11 | CB05_PAR,PAR,1,0.992943,Mmol,mol/s
12 | CB05_TERP,TERP,1,29.63422,Mmol,mol/s
13 | CB05_TOL,TOL,1,15.11587,Mmol,mol/s
14 | CB05_UNR,UNR,1,136.2352,Mmol,mol/s
15 | CB05_XYL,XYL,1,96.79334,Mmol,mol/s
16 | PM25,PAL,1,0,Mg,g/s
17 | PM25,PCA,1,0,Mg,g/s
18 | PM25,PCL,1,0,Mg,g/s
19 | PM25,PFE,1,0,Mg,g/s
20 | PM25,PH2O,1,0,Mg,g/s
21 | PM25,PK,1,0,Mg,g/s
22 | PM25,PMG,1,0,Mg,g/s
23 | PM25,PMN,1,0,Mg,g/s
24 | PM25,PMOTHR,1,0,Mg,g/s
25 | PM25,PNA,1,0,Mg,g/s
26 | PM25,PNCOM,1,0,Mg,g/s
27 | PM25,PNH4,1,0,Mg,g/s
28 | PM25,PNO3,1,0,Mg,g/s
29 | PM25,PSI,1,0,Mg,g/s
30 | PM25,PSO4,1,0,Mg,g/s
31 | PM25,PTI,1,0,Mg,g/s
32 | SO2,SO2,0,64,Mg,mol/s
33 | SO2,SULF,0,98,Mg,mol/s
34 | NH3,NH3,1,17,Mg,mol/s
35 | CO,CO,0,28,Mg,mol/s
36 | PMC,PMC,0,1,Mg,g/s
37 | NOx,NO,0,30,Mg,mol/s
38 | NOx,NO2,0,46,Mg,mol/s
39 | NOx,HONO,0,47,Mg,mol/s
40 | OC,POC,0,1,Mg,g/s
41 |
--------------------------------------------------------------------------------
/species/MEIC-CB05_CB06_speciate_industry.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | CB05_ALD2,ALD2,1,44.03499,Mmol,mol/s
3 | CB05_ALDX,ALDX,1,39.22808,Mmol,mol/s
4 | CB05_ETH,ETH,1,28.053,Mmol,mol/s
5 | CB05_ETOH,ETOH,1,30.069,Mmol,mol/s
6 | CB05_FORM,FORM,1,45.8447,Mmol,mol/s
7 | CB05_IOLE,IOLE,1,30.026,Mmol,mol/s
8 | CB05_ISOP,ISOP,1,57.09965,Mmol,mol/s
9 | CB05_MEOH,MEOH,1,68.117,Mmol,mol/s
10 | CB05_OLE,OLE,1,32.042,Mmol,mol/s
11 | CB05_PAR,PAR,1,0.992943,Mmol,mol/s
12 | CB05_TERP,TERP,1,29.63422,Mmol,mol/s
13 | CB05_TOL,TOL,1,15.11587,Mmol,mol/s
14 | CB05_UNR,UNR,1,136.2352,Mmol,mol/s
15 | CB05_XYL,XYL,1,96.79334,Mmol,mol/s
16 | PM25,PAL,0.030838,1,Mg,g/s
17 | PM25,PCA,0.04692,1,Mg,g/s
18 | PM25,PCL,0.005623,1,Mg,g/s
19 | PM25,PFE,0.029831,1,Mg,g/s
20 | PM25,PH2O,0.007782,1,Mg,g/s
21 | PM25,PK,0.020635,1,Mg,g/s
22 | PM25,PMG,0.001488,1,Mg,g/s
23 | PM25,PMN,0.001368,1,Mg,g/s
24 | PM25,PMOTHR,0.569705,1,Mg,g/s
25 | PM25,PNA,0.01045,1,Mg,g/s
26 | PM25,PNCOM,0.078912,1,Mg,g/s
27 | PM25,PNH4,0.001819,1,Mg,g/s
28 | PM25,PNO3,0.004444,1,Mg,g/s
29 | PM25,PSI,0.089135,1,Mg,g/s
30 | PM25,PSO4,0.096941,1,Mg,g/s
31 | PM25,PTI,0.00411,1,Mg,g/s
32 | SO2,SO2,1,64,Mg,mol/s
33 | SO2,SULF,0.00919,98,Mg,mol/s
34 | NH3,NH3,1,17,Mg,mol/s
35 | CO,CO,1,28,Mg,mol/s
36 | PMC,PMC,1,1,Mg,g/s
37 | NOx,NO,0.9,30,Mg,mol/s
38 | NOx,NO2,0.092,46,Mg,mol/s
39 | NOx,HONO,0.008,47,Mg,mol/s
40 | OC,POC,1,1,Mg,g/s
41 |
--------------------------------------------------------------------------------
/species/MEIC-CB05_CB06_speciate_power.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | CB05_ALD2,ALD2,1,44.03499,Mmol,mol/s
3 | CB05_ALDX,ALDX,1,39.22808,Mmol,mol/s
4 | CB05_ETH,ETH,1,28.053,Mmol,mol/s
5 | CB05_ETOH,ETOH,1,30.069,Mmol,mol/s
6 | CB05_FORM,FORM,1,45.8447,Mmol,mol/s
7 | CB05_IOLE,IOLE,1,30.026,Mmol,mol/s
8 | CB05_ISOP,ISOP,1,57.09965,Mmol,mol/s
9 | CB05_MEOH,MEOH,1,68.117,Mmol,mol/s
10 | CB05_OLE,OLE,1,32.042,Mmol,mol/s
11 | CB05_PAR,PAR,1,0.992943,Mmol,mol/s
12 | CB05_TERP,TERP,1,29.63422,Mmol,mol/s
13 | CB05_TOL,TOL,1,15.11587,Mmol,mol/s
14 | CB05_UNR,UNR,1,136.2352,Mmol,mol/s
15 | CB05_XYL,XYL,1,96.79334,Mmol,mol/s
16 | PM25,PAL,0.050235,1,Mg,g/s
17 | PM25,PCA,0.034628,1,Mg,g/s
18 | PM25,PCL,0.001152,1,Mg,g/s
19 | PM25,PFE,0.025104,1,Mg,g/s
20 | PM25,PH2O,3.95E-05,1,Mg,g/s
21 | PM25,PK,0.004844,1,Mg,g/s
22 | PM25,PMG,0.000459,1,Mg,g/s
23 | PM25,PMN,0.000248,1,Mg,g/s
24 | PM25,PMOTHR,0.588929,1,Mg,g/s
25 | PM25,PNA,0.000116,1,Mg,g/s
26 | PM25,PNCOM,0.043903,1,Mg,g/s
27 | PM25,PNH4,0.003699,1,Mg,g/s
28 | PM25,PNO3,0.006201,1,Mg,g/s
29 | PM25,PSI,0.0774444,1,Mg,g/s
30 | PM25,PSO4,0.159122,1,Mg,g/s
31 | PM25,PTI,0.003877,1,Mg,g/s
32 | SO2,SO2,1,64,Mg,mol/s
33 | SO2,SULF,0.0218,98,Mg,mol/s
34 | NH3,NH3,1,17,Mg,mol/s
35 | CO,CO,1,28,Mg,mol/s
36 | PMC,PMC,1,1,Mg,g/s
37 | NOx,NO,0.9,30,Mg,mol/s
38 | NOx,NO2,0.092,46,Mg,mol/s
39 | NOx,HONO,0.008,47,Mg,mol/s
40 | OC,POC,1,1,Mg,g/s
41 |
--------------------------------------------------------------------------------
/species/MEIC-CB05_CB06_speciate_residential.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | CB05_ALD2,ALD2,1,44.03499,Mmol,mol/s
3 | CB05_ALDX,ALDX,1,39.22808,Mmol,mol/s
4 | CB05_ETH,ETH,1,28.053,Mmol,mol/s
5 | CB05_ETOH,ETOH,1,30.069,Mmol,mol/s
6 | CB05_FORM,FORM,1,45.8447,Mmol,mol/s
7 | CB05_IOLE,IOLE,1,30.026,Mmol,mol/s
8 | CB05_ISOP,ISOP,1,57.09965,Mmol,mol/s
9 | CB05_MEOH,MEOH,1,68.117,Mmol,mol/s
10 | CB05_OLE,OLE,1,32.042,Mmol,mol/s
11 | CB05_PAR,PAR,1,0.992943,Mmol,mol/s
12 | CB05_TERP,TERP,1,29.63422,Mmol,mol/s
13 | CB05_TOL,TOL,1,15.11587,Mmol,mol/s
14 | CB05_UNR,UNR,1,136.2352,Mmol,mol/s
15 | CB05_XYL,XYL,1,96.79334,Mmol,mol/s
16 | PM25,PAL,9.06E-04,1,Mg,g/s
17 | PM25,PCA,0.00343,1,Mg,g/s
18 | PM25,PCL,0.026758,1,Mg,g/s
19 | PM25,PFE,6.75E-04,1,Mg,g/s
20 | PM25,PH2O,2.86E-05,1,Mg,g/s
21 | PM25,PK,0.037723,1,Mg,g/s
22 | PM25,PMG,4.89E-04,1,Mg,g/s
23 | PM25,PMN,2.48E-05,1,Mg,g/s
24 | PM25,PMOTHR,0.075385,1,Mg,g/s
25 | PM25,PNA,5.56E-03,1,Mg,g/s
26 | PM25,PNCOM,0.793224,1,Mg,g/s
27 | PM25,PNH4,0.006773,1,Mg,g/s
28 | PM25,PNO3,0.006223,1,Mg,g/s
29 | PM25,PSI,0.015033,1,Mg,g/s
30 | PM25,PSO4,0.027662,1,Mg,g/s
31 | PM25,PTI,1.03E-04,1,Mg,g/s
32 | SO2,SO2,1,64,Mg,mol/s
33 | SO2,SULF,0.0148,98,Mg,mol/s
34 | NH3,NH3,1,17,Mg,mol/s
35 | CO,CO,1,28,Mg,mol/s
36 | PMC,PMC,1,1,Mg,g/s
37 | NOx,NO,0.9,30,Mg,mol/s
38 | NOx,NO2,0.092,46,Mg,mol/s
39 | NOx,HONO,0.008,47,Mg,mol/s
40 | OC,POC,1,1,Mg,g/s
41 |
--------------------------------------------------------------------------------
/species/MEIC-CB05_CB06_speciate_transportation.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | CB05_ALD2,ALD2,1,44.03499,Mmol,mol/s
3 | CB05_ALDX,ALDX,1,39.22808,Mmol,mol/s
4 | CB05_ETH,ETH,1,28.053,Mmol,mol/s
5 | CB05_ETOH,ETOH,1,30.069,Mmol,mol/s
6 | CB05_FORM,FORM,1,45.8447,Mmol,mol/s
7 | CB05_IOLE,IOLE,1,30.026,Mmol,mol/s
8 | CB05_ISOP,ISOP,1,57.09965,Mmol,mol/s
9 | CB05_MEOH,MEOH,1,68.117,Mmol,mol/s
10 | CB05_OLE,OLE,1,32.042,Mmol,mol/s
11 | CB05_PAR,PAR,1,0.992943,Mmol,mol/s
12 | CB05_TERP,TERP,1,29.63422,Mmol,mol/s
13 | CB05_TOL,TOL,1,15.11587,Mmol,mol/s
14 | CB05_UNR,UNR,1,136.2352,Mmol,mol/s
15 | CB05_XYL,XYL,1,96.79334,Mmol,mol/s
16 | PM25,PAL,0.005602,1,Mg,g/s
17 | PM25,PCA,0.033303,1,Mg,g/s
18 | PM25,PCL,0.009179,1,Mg,g/s
19 | PM25,PFE,0.06222,1,Mg,g/s
20 | PM25,PH2O,0.003706,1,Mg,g/s
21 | PM25,PK,0.002276,1,Mg,g/s
22 | PM25,PMG,0.040729,1,Mg,g/s
23 | PM25,PMN,5.00E-04,1,Mg,g/s
24 | PM25,PMOTHR,0.264368,1,Mg,g/s
25 | PM25,PNA,0.002986,1,Mg,g/s
26 | PM25,PNCOM,0.375107,1,Mg,g/s
27 | PM25,PNH4,0.041369,1,Mg,g/s
28 | PM25,PNO3,0.016703,1,Mg,g/s
29 | PM25,PSI,0.043676,1,Mg,g/s
30 | PM25,PSO4,0.09631,1,Mg,g/s
31 | PM25,PTI,0.001967,1,Mg,g/s
32 | SO2,SO2,1,64,Mg,mol/s
33 | SO2,SULF,0,98,Mg,mol/s
34 | NH3,NH3,1,17,Mg,mol/s
35 | CO,CO,1,28,Mg,mol/s
36 | PMC,PMC,1,1,Mg,g/s
37 | NOx,NO,0.9,30,Mg,mol/s
38 | NOx,NO2,0.092,46,Mg,mol/s
39 | NOx,HONO,0.008,47,Mg,mol/s
40 | OC,POC,1,1,Mg,g/s
41 |
--------------------------------------------------------------------------------
/README.CN.md:
--------------------------------------------------------------------------------
1 | # 教程目录
2 |
3 | --------------
4 |
5 | 1. [配置MEIAT-CMAQ的运行环境。](https://mp.weixin.qq.com/s/1CEcoSDeCF9l-an_GD_EJg)
6 |
7 | * 此教程将详细讲解如何配置MEIAT-CMAQ的运行环境。
8 |
9 | --------------
10 |
11 | 2. [通过MEICv1.3的2017年排放清单制作珠江三角洲模拟域的CMAQ排放文件。](Doc/adopt_meic_for_prd_emission_file_cn.md)
12 |
13 | * 此教程会帮助用户了解最基础的CMAQ排放文件制作流程。
14 |
15 | --------------
16 |
17 | 3. [跳过空间分配步骤直接输出CMAQ排放文件。](Doc/how_to_treat_the_emssion_which_resolution_is_fine_cn.md)
18 |
19 | * 此教程主要是为了弥补**教程2**中的不足,在**教程2**中,我们仅可能将较粗的排放清单降尺度到较细分辨率,而此教程则帮助我们了解如何使用MEIAT-CMAQ将较细分辨率的排放清单应用到较粗分辨率的模拟域中。
20 |
21 | --------------
22 |
23 | 4. [在MEIC清单中添加东亚地区的排放(MIX)。](Doc/how_to_combine_meic_and_mix_cn.md)
24 |
25 | * 此教程主要为引导用户学会对GeoTIFF文件进行批量镶嵌工作。
26 |
27 | --------------
28 |
29 | 5. [在MEIC清单中使用本地排放源。](Doc/adopt_local_emission_to_meic_cn.md)
30 |
31 | * 此教程将会引导用户学会如何灵活使用MEIAT-CMAQ使其能够同时使用表格化排放清单和网格化排放清单。
32 |
33 | --------------
34 |
35 | 6. [对年尺度的排放清单进行处理。](Doc/how_to_treat_the_yearly_emission_cn.md)
36 |
37 | * MEIAT-CMAQ只允许直接处理月尺度的排放清单,但是我们可以通过月时间分配文件将年尺度清单转换到月尺度清单。
38 |
39 | --------------
40 |
41 | 7. [将排放文件进行垂直分配。](Doc/how_to_do_vertical_allocation_cn.md)
42 |
43 | * 此教程将引导用户学会如何使用MEIAT-CMAQ对排放清单进行垂直分配。
44 |
45 | --------------
46 |
47 | 8. [生成CMAQ-ISAM可以使用的区域文件。](Doc/how_to_use_shapefile_for_mask_cn.md)
48 |
49 | * 制作区域文件一直是运行CMAQ-ISAM的一个难点,本教程将引导用户学会如何快速完成对区域文件的制作。
50 |
51 | --------------
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/Doc/how_to_combine_meic_and_mix_cn.md:
--------------------------------------------------------------------------------
1 | # 如何同时使用MEIC和MIX清单?
2 |
3 | ------------------------
4 |
5 | **作者:王浩帆**
6 |
7 | ------------------------
8 |
9 | MEIC清单仅为中国境内的排放清单,但是在模拟全国污染场的案例中,中国周边国家的排放是不容忽视的,因此需要通过MIX清单来对MEIC进行一个补充。
10 |
11 | 不论是模拟网格分辨率大于等于清单网格分辨率,还是模拟网格分辨率小于清单网格分辨率的情况,同时使用MEIC和MIX清单的关键步骤都是如何将MEIC清单镶嵌到MIX中,
12 | 作为一系列新的GeoTIFF文件来作为[coarse_emission_2_fine_emission.py](../coarse_emission_2_fine_emission.py)和
13 | [fine_emission_2_coarse_emission.py](../fine_emission_2_coarse_emission.py)的输入。
14 |
15 | **因此本部分将重点讲解如何使用工具来完成两个系列GeoTIFF的镶嵌工作。**
16 |
17 | 1. 将MIX清单和MEIC清单都转换为GeoTiff格式。
18 | * 使用[mix_2_GeoTiff.py](../PREP/mix_2_GeoTiff.py)将MIX清单转换为GeoTiff格式。
19 | * 使用[meic_2_GeoTiff.py](../PREP/meic_2_GeoTiff.py)将MEIC清单转换为GeoTiff格式。
20 | 由于MIX清单中没有PMC,因此需要通过[calculate-pmc.py](../calculate-pmc.py)将其计算出来。
21 |
22 | 2. 配置[combine.py](../UTIL/combine/combine.py)中的输入参数。
23 |
24 | * upper_raster_dir:上层GeoTiff所在目录路径。
25 | * bottom_raster_dir:下层GeoTiff所在目录路径。
26 | * output_dir:输出GeoTiff目录路径。
27 |
28 |
29 | * upper_raster_pollutants:上层GeoTiff需要合并的污染物名称。
30 | * bottom_raster_pollutants:下层GeoTiff所对应的污染物名称。
31 | * output_pollutants:一一对应到的输出污染物的名称。
32 |
33 |
34 | * upper_label:上层GeoTiff标签。
35 | * bottom_label:下层GeoTiff标签。
36 | * output_label:输出GeoTiff标签。
37 |
38 |
39 | * upper_raster_template:任意一个上层GeoTiff文件路径。
40 | * bottom_raster_template:任意一个下层GeoTiff文件路径。
41 |
42 |
43 | * upper_resolution:上层GeoTiff的分辨率。
44 | * bottom_resolution:下层GeoTiff的分辨率。
45 |
46 |
47 | * sectors:需要合并的部门。
48 |
49 |
50 | * upper_year:上层GeoTiff的年份。
51 | * bottom_year:下层GeoTiff的年份。
52 | * output_year:定义输出GeoTiff的年份。
53 |
54 |
55 | 3. 运行[combine.py](../UTIL/combine/combine.py)
56 |
57 | 在终端中输入:
58 | ```shell
59 | python ./combine.py
60 | ```
61 | 便可以开始运行程序,程序结束后将在`output_dir`中产生合并后的系列GeoTiff。
62 |
63 | 4. 进行空间分配、物种分配和时间分配。
64 |
65 | 此步骤和[第一个教程](1-adopt_meic_for_prd_emission_file.md)或第二个教程中的步骤完全相同,不再赘述。
66 |
67 |
--------------------------------------------------------------------------------
/PREP/heic_2_GeoTiff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2022/12/12 20:07
4 | # @Author :Haofan Wang
5 | import glob
6 | import os
7 |
8 | import xarray as xr
9 |
10 | import rasterio as rio
11 | from rasterio.transform import Affine
12 |
13 | if __name__ == "__main__":
14 | # Set where is the origin inventory in.
15 | fdir = r"H:\MEIC\中国高分辨率大气污染物集成清单"
16 |
17 | # Set the name of inventory.
18 | files = glob.glob(f"{fdir}/*.nc")
19 |
20 | for file in files:
21 | ds = xr.open_dataset(file)
22 | lats = ds.coords["lat"].__array__()
23 | lons = ds.coords["lon"].__array__()
24 | lonmin, latmax, lonmax, latmin = lons.min(), lats.max(), lons.max(), lats.min()
25 | num_lon = ds.__getattr__("xllcorner")
26 | num_lat = ds.__getattr__("yllcorner")
27 | res = 0.1
28 |
29 | transform = Affine.translation(lonmin - res / 2, latmin - res / 2) * Affine.scale(res, res)
30 |
31 | # Set sectors.
32 | sectors = ["POWER", "INDUSTRY", "RESIDENTIAL", "TRANSPORTATION", "SOLVENT", "AGRICULTURE", "BIOMASS", "SHIPPING"]
33 | # Obtain the pollutant.
34 | pollutant = os.path.basename(file).split("_")[0]
35 |
36 | for sector in sectors:
37 | for i in range(12):
38 | # convert to tons/year
39 | temp_ds = ds[f"{pollutant}_{sector}"][i, ...]
40 | mm = r"%.2d" % (i + 1)
41 | tiffile = f"HEIC_2017_{mm}__{sector}__{pollutant}.tiff"
42 | # print(ds)
43 | with rio.open(tiffile,
44 | 'w',
45 | driver='GTiff',
46 | height=num_lat,
47 | width=num_lon,
48 | count=1,
49 | dtype=temp_ds.dtype,
50 | crs='+proj=latlong',
51 | transform=transform, ) as dst:
52 | dst.write(temp_ds, 1)
53 | print(f"Finish and output {tiffile}.")
54 |
--------------------------------------------------------------------------------
/species/MEIC-SAPRC07_SAPRC07_speciate_power.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | SAPRC07_AACD,AACD,1,60.05,Mmol,mol/s
3 | SAPRC07_ACET,ACET,1,58.08,Mmol,mol/s
4 | SAPRC07_ACYE,ACYE,1,26.04,Mmol,mol/s
5 | SAPRC07_ALK1,ALK1,1,30.07,Mmol,mol/s
6 | SAPRC07_ALK2,ALK2,1,36.73,Mmol,mol/s
7 | SAPRC07_ALK3,ALK3,1,58.61,Mmol,mol/s
8 | SAPRC07_ALK4,ALK4,1,77.6,Mmol,mol/s
9 | SAPRC07_ALK5,ALK5,1,118.89,Mmol,mol/s
10 | SAPRC07_ARO1,ARO1,1,95.16,Mmol,mol/s
11 | SAPRC07_ARO2,ARO2MN,1,118.72,Mmol,mol/s
12 | SAPRC07_BACL,BACL,1,86.09,Mmol,mol/s
13 | SAPRC07_BALD,BALD,1,106.13,Mmol,mol/s
14 | SAPRC07_BENZ,BENZ,1,78.11,Mmol,mol/s
15 | SAPRC07_CH4,CH4,1,16.04,Mmol,mol/s
16 | SAPRC07_CCHO,CCHO,1,44.05,Mmol,mol/s
17 | SAPRC07_CRES,CRES,1,108.14,Mmol,mol/s
18 | SAPRC07_ETHE,ETHE,1,28.05,Mmol,mol/s
19 | SAPRC07_FACD,FACD,1,46.03,Mmol,mol/s
20 | SAPRC07_GLY,GLY,1,58.04,Mmol,mol/s
21 | SAPRC07_HCHO,HCHO,1,30.03,Mmol,mol/s
22 | SAPRC07_IPRD,IPRD,1,100.12,Mmol,mol/s
23 | SAPRC07_ISOP,ISOP,1,68.12,Mmol,mol/s
24 | SAPRC07_MACR,MACR,1,70.09,Mmol,mol/s
25 | SAPRC07_MEK,MEK,1,72.11,Mmol,mol/s
26 | SAPRC07_MEOH,MEOH,1,32.04,Mmol,mol/s
27 | SAPRC07_MGLY,MGLY,1,72.04,Mmol,mol/s
28 | SAPRC07_MVK,MVK,1,70.09,Mmol,mol/s
29 | SAPRC07_OLE1,OLE1,1,72.34,Mmol,mol/s
30 | SAPRC07_OLE2,OLE2,1,75.78,Mmol,mol/s
31 | SAPRC07_PACD,PACD,1,74.08,Mmol,mol/s
32 | SAPRC07_PRD2,PRD2,1,116.16,Mmol,mol/s
33 | SAPRC07_RCHO,RCHO,1,58.08,Mmol,mol/s
34 | SAPRC07_RNO3,RNO3,1,147.18,Mmol,mol/s
35 | SAPRC07_TERP,TERP,1,136.24,Mmol,mol/s
36 | PM25,PAL,5.60E-03,1,Mg,g/s
37 | PM25,PCA,0.033303,1,Mg,g/s
38 | PM25,PCL,0.009179,1,Mg,g/s
39 | PM25,PFE,6.22E-02,1,Mg,g/s
40 | PM25,PH2O,3.71E-03,1,Mg,g/s
41 | PM25,PK,0.002276,1,Mg,g/s
42 | PM25,PMG,4.07E-02,1,Mg,g/s
43 | PM25,PMN,5.00E-04,1,Mg,g/s
44 | PM25,PMOTHR,0.264368,1,Mg,g/s
45 | PM25,PNA,2.99E-03,1,Mg,g/s
46 | PM25,PNCOM,0.375107,1,Mg,g/s
47 | PM25,PNH4,0.041369,1,Mg,g/s
48 | PM25,PNO3,0.016703,1,Mg,g/s
49 | PM25,PSI,0.043676,1,Mg,g/s
50 | PM25,PSO4,0.09631,1,Mg,g/s
51 | PM25,PTI,1.97E-03,1,Mg,g/s
52 | SO2,SO2,1,64,Mg,mol/s
53 | SO2,SULF,0,98,Mg,mol/s
54 | NH3,NH3,1,17,Mg,mol/s
55 | CO,CO,1,28,Mg,mol/s
56 | PMC,PMC,1,1,Mg,g/s
57 | NOx,NO,0.9,30,Mg,mol/s
58 | NOx,NO2,0.092,46,Mg,mol/s
59 | NOx,HONO,0.008,47,Mg,mol/s
60 | OC,POC,1,1,Mg,g/s
61 |
--------------------------------------------------------------------------------
/species/MEIC-SAPRC07_SAPRC07_speciate_agriculture.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | SAPRC07_AACD,AACD,1,60.05,Mmol,mol/s
3 | SAPRC07_ACET,ACET,1,58.08,Mmol,mol/s
4 | SAPRC07_ACYE,ACYE,1,26.04,Mmol,mol/s
5 | SAPRC07_ALK1,ALK1,1,30.07,Mmol,mol/s
6 | SAPRC07_ALK2,ALK2,1,36.73,Mmol,mol/s
7 | SAPRC07_ALK3,ALK3,1,58.61,Mmol,mol/s
8 | SAPRC07_ALK4,ALK4,1,77.6,Mmol,mol/s
9 | SAPRC07_ALK5,ALK5,1,118.89,Mmol,mol/s
10 | SAPRC07_ARO1,ARO1,1,95.16,Mmol,mol/s
11 | SAPRC07_ARO2,ARO2MN,1,118.72,Mmol,mol/s
12 | SAPRC07_BACL,BACL,1,86.09,Mmol,mol/s
13 | SAPRC07_BALD,BALD,1,106.13,Mmol,mol/s
14 | SAPRC07_BENZ,BENZ,1,78.11,Mmol,mol/s
15 | SAPRC07_CH4,CH4,1,16.04,Mmol,mol/s
16 | SAPRC07_CCHO,CCHO,1,44.05,Mmol,mol/s
17 | SAPRC07_CRES,CRES,1,108.14,Mmol,mol/s
18 | SAPRC07_ETHE,ETHE,1,28.05,Mmol,mol/s
19 | SAPRC07_FACD,FACD,1,46.03,Mmol,mol/s
20 | SAPRC07_GLY,GLY,1,58.04,Mmol,mol/s
21 | SAPRC07_HCHO,HCHO,1,30.03,Mmol,mol/s
22 | SAPRC07_IPRD,IPRD,1,100.12,Mmol,mol/s
23 | SAPRC07_ISOP,ISOP,1,68.12,Mmol,mol/s
24 | SAPRC07_MACR,MACR,1,70.09,Mmol,mol/s
25 | SAPRC07_MEK,MEK,1,72.11,Mmol,mol/s
26 | SAPRC07_MEOH,MEOH,1,32.04,Mmol,mol/s
27 | SAPRC07_MGLY,MGLY,1,72.04,Mmol,mol/s
28 | SAPRC07_MVK,MVK,1,70.09,Mmol,mol/s
29 | SAPRC07_OLE1,OLE1,1,72.34,Mmol,mol/s
30 | SAPRC07_OLE2,OLE2,1,75.78,Mmol,mol/s
31 | SAPRC07_PACD,PACD,1,74.08,Mmol,mol/s
32 | SAPRC07_PRD2,PRD2,1,116.16,Mmol,mol/s
33 | SAPRC07_RCHO,RCHO,1,58.08,Mmol,mol/s
34 | SAPRC07_RNO3,RNO3,1,147.18,Mmol,mol/s
35 | SAPRC07_TERP,TERP,1,136.24,Mmol,mol/s
36 | PM25,PAL,5.60E-03,1,Mg,g/s
37 | PM25,PCA,0.033303,1,Mg,g/s
38 | PM25,PCL,0.009179,1,Mg,g/s
39 | PM25,PFE,6.22E-02,1,Mg,g/s
40 | PM25,PH2O,3.71E-03,1,Mg,g/s
41 | PM25,PK,0.002276,1,Mg,g/s
42 | PM25,PMG,4.07E-02,1,Mg,g/s
43 | PM25,PMN,5.00E-04,1,Mg,g/s
44 | PM25,PMOTHR,0.264368,1,Mg,g/s
45 | PM25,PNA,2.99E-03,1,Mg,g/s
46 | PM25,PNCOM,0.375107,1,Mg,g/s
47 | PM25,PNH4,0.041369,1,Mg,g/s
48 | PM25,PNO3,0.016703,1,Mg,g/s
49 | PM25,PSI,0.043676,1,Mg,g/s
50 | PM25,PSO4,0.09631,1,Mg,g/s
51 | PM25,PTI,1.97E-03,1,Mg,g/s
52 | SO2,SO2,1,64,Mg,mol/s
53 | SO2,SULF,0,98,Mg,mol/s
54 | NH3,NH3,1,17,Mg,mol/s
55 | CO,CO,1,28,Mg,mol/s
56 | PMC,PMC,1,1,Mg,g/s
57 | NOx,NO,0.9,30,Mg,mol/s
58 | NOx,NO2,0.092,46,Mg,mol/s
59 | NOx,HONO,0.008,47,Mg,mol/s
60 | OC,POC,1,1,Mg,g/s
61 |
--------------------------------------------------------------------------------
/species/MEIC-SAPRC07_SAPRC07_speciate_industry.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | SAPRC07_AACD,AACD,1,60.05,Mmol,mol/s
3 | SAPRC07_ACET,ACET,1,58.08,Mmol,mol/s
4 | SAPRC07_ACYE,ACYE,1,26.04,Mmol,mol/s
5 | SAPRC07_ALK1,ALK1,1,30.07,Mmol,mol/s
6 | SAPRC07_ALK2,ALK2,1,36.73,Mmol,mol/s
7 | SAPRC07_ALK3,ALK3,1,58.61,Mmol,mol/s
8 | SAPRC07_ALK4,ALK4,1,77.6,Mmol,mol/s
9 | SAPRC07_ALK5,ALK5,1,118.89,Mmol,mol/s
10 | SAPRC07_ARO1,ARO1,1,95.16,Mmol,mol/s
11 | SAPRC07_ARO2,ARO2MN,1,118.72,Mmol,mol/s
12 | SAPRC07_BACL,BACL,1,86.09,Mmol,mol/s
13 | SAPRC07_BALD,BALD,1,106.13,Mmol,mol/s
14 | SAPRC07_BENZ,BENZ,1,78.11,Mmol,mol/s
15 | SAPRC07_CH4,CH4,1,16.04,Mmol,mol/s
16 | SAPRC07_CCHO,CCHO,1,44.05,Mmol,mol/s
17 | SAPRC07_CRES,CRES,1,108.14,Mmol,mol/s
18 | SAPRC07_ETHE,ETHE,1,28.05,Mmol,mol/s
19 | SAPRC07_FACD,FACD,1,46.03,Mmol,mol/s
20 | SAPRC07_GLY,GLY,1,58.04,Mmol,mol/s
21 | SAPRC07_HCHO,HCHO,1,30.03,Mmol,mol/s
22 | SAPRC07_IPRD,IPRD,1,100.12,Mmol,mol/s
23 | SAPRC07_ISOP,ISOP,1,68.12,Mmol,mol/s
24 | SAPRC07_MACR,MACR,1,70.09,Mmol,mol/s
25 | SAPRC07_MEK,MEK,1,72.11,Mmol,mol/s
26 | SAPRC07_MEOH,MEOH,1,32.04,Mmol,mol/s
27 | SAPRC07_MGLY,MGLY,1,72.04,Mmol,mol/s
28 | SAPRC07_MVK,MVK,1,70.09,Mmol,mol/s
29 | SAPRC07_OLE1,OLE1,1,72.34,Mmol,mol/s
30 | SAPRC07_OLE2,OLE2,1,75.78,Mmol,mol/s
31 | SAPRC07_PACD,PACD,1,74.08,Mmol,mol/s
32 | SAPRC07_PRD2,PRD2,1,116.16,Mmol,mol/s
33 | SAPRC07_RCHO,RCHO,1,58.08,Mmol,mol/s
34 | SAPRC07_RNO3,RNO3,1,147.18,Mmol,mol/s
35 | SAPRC07_TERP,TERP,1,136.24,Mmol,mol/s
36 | PM25,PAL,5.60E-03,1,Mg,g/s
37 | PM25,PCA,0.033303,1,Mg,g/s
38 | PM25,PCL,0.009179,1,Mg,g/s
39 | PM25,PFE,6.22E-02,1,Mg,g/s
40 | PM25,PH2O,3.71E-03,1,Mg,g/s
41 | PM25,PK,0.002276,1,Mg,g/s
42 | PM25,PMG,4.07E-02,1,Mg,g/s
43 | PM25,PMN,5.00E-04,1,Mg,g/s
44 | PM25,PMOTHR,0.264368,1,Mg,g/s
45 | PM25,PNA,2.99E-03,1,Mg,g/s
46 | PM25,PNCOM,0.375107,1,Mg,g/s
47 | PM25,PNH4,0.041369,1,Mg,g/s
48 | PM25,PNO3,0.016703,1,Mg,g/s
49 | PM25,PSI,0.043676,1,Mg,g/s
50 | PM25,PSO4,0.09631,1,Mg,g/s
51 | PM25,PTI,1.97E-03,1,Mg,g/s
52 | SO2,SO2,1,64,Mg,mol/s
53 | SO2,SULF,0,98,Mg,mol/s
54 | NH3,NH3,1,17,Mg,mol/s
55 | CO,CO,1,28,Mg,mol/s
56 | PMC,PMC,1,1,Mg,g/s
57 | NOx,NO,0.9,30,Mg,mol/s
58 | NOx,NO2,0.092,46,Mg,mol/s
59 | NOx,HONO,0.008,47,Mg,mol/s
60 | OC,POC,1,1,Mg,g/s
61 |
--------------------------------------------------------------------------------
/species/MEIC-SAPRC07_SAPRC07_speciate_residential.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | SAPRC07_AACD,AACD,1,60.05,Mmol,mol/s
3 | SAPRC07_ACET,ACET,1,58.08,Mmol,mol/s
4 | SAPRC07_ACYE,ACYE,1,26.04,Mmol,mol/s
5 | SAPRC07_ALK1,ALK1,1,30.07,Mmol,mol/s
6 | SAPRC07_ALK2,ALK2,1,36.73,Mmol,mol/s
7 | SAPRC07_ALK3,ALK3,1,58.61,Mmol,mol/s
8 | SAPRC07_ALK4,ALK4,1,77.6,Mmol,mol/s
9 | SAPRC07_ALK5,ALK5,1,118.89,Mmol,mol/s
10 | SAPRC07_ARO1,ARO1,1,95.16,Mmol,mol/s
11 | SAPRC07_ARO2,ARO2MN,1,118.72,Mmol,mol/s
12 | SAPRC07_BACL,BACL,1,86.09,Mmol,mol/s
13 | SAPRC07_BALD,BALD,1,106.13,Mmol,mol/s
14 | SAPRC07_BENZ,BENZ,1,78.11,Mmol,mol/s
15 | SAPRC07_CH4,CH4,1,16.04,Mmol,mol/s
16 | SAPRC07_CCHO,CCHO,1,44.05,Mmol,mol/s
17 | SAPRC07_CRES,CRES,1,108.14,Mmol,mol/s
18 | SAPRC07_ETHE,ETHE,1,28.05,Mmol,mol/s
19 | SAPRC07_FACD,FACD,1,46.03,Mmol,mol/s
20 | SAPRC07_GLY,GLY,1,58.04,Mmol,mol/s
21 | SAPRC07_HCHO,HCHO,1,30.03,Mmol,mol/s
22 | SAPRC07_IPRD,IPRD,1,100.12,Mmol,mol/s
23 | SAPRC07_ISOP,ISOP,1,68.12,Mmol,mol/s
24 | SAPRC07_MACR,MACR,1,70.09,Mmol,mol/s
25 | SAPRC07_MEK,MEK,1,72.11,Mmol,mol/s
26 | SAPRC07_MEOH,MEOH,1,32.04,Mmol,mol/s
27 | SAPRC07_MGLY,MGLY,1,72.04,Mmol,mol/s
28 | SAPRC07_MVK,MVK,1,70.09,Mmol,mol/s
29 | SAPRC07_OLE1,OLE1,1,72.34,Mmol,mol/s
30 | SAPRC07_OLE2,OLE2,1,75.78,Mmol,mol/s
31 | SAPRC07_PACD,PACD,1,74.08,Mmol,mol/s
32 | SAPRC07_PRD2,PRD2,1,116.16,Mmol,mol/s
33 | SAPRC07_RCHO,RCHO,1,58.08,Mmol,mol/s
34 | SAPRC07_RNO3,RNO3,1,147.18,Mmol,mol/s
35 | SAPRC07_TERP,TERP,1,136.24,Mmol,mol/s
36 | PM25,PAL,5.60E-03,1,Mg,g/s
37 | PM25,PCA,0.033303,1,Mg,g/s
38 | PM25,PCL,0.009179,1,Mg,g/s
39 | PM25,PFE,6.22E-02,1,Mg,g/s
40 | PM25,PH2O,3.71E-03,1,Mg,g/s
41 | PM25,PK,0.002276,1,Mg,g/s
42 | PM25,PMG,4.07E-02,1,Mg,g/s
43 | PM25,PMN,5.00E-04,1,Mg,g/s
44 | PM25,PMOTHR,0.264368,1,Mg,g/s
45 | PM25,PNA,2.99E-03,1,Mg,g/s
46 | PM25,PNCOM,0.375107,1,Mg,g/s
47 | PM25,PNH4,0.041369,1,Mg,g/s
48 | PM25,PNO3,0.016703,1,Mg,g/s
49 | PM25,PSI,0.043676,1,Mg,g/s
50 | PM25,PSO4,0.09631,1,Mg,g/s
51 | PM25,PTI,1.97E-03,1,Mg,g/s
52 | SO2,SO2,1,64,Mg,mol/s
53 | SO2,SULF,0,98,Mg,mol/s
54 | NH3,NH3,1,17,Mg,mol/s
55 | CO,CO,1,28,Mg,mol/s
56 | PMC,PMC,1,1,Mg,g/s
57 | NOx,NO,0.9,30,Mg,mol/s
58 | NOx,NO2,0.092,46,Mg,mol/s
59 | NOx,HONO,0.008,47,Mg,mol/s
60 | OC,POC,1,1,Mg,g/s
61 |
--------------------------------------------------------------------------------
/species/MEIC-SAPRC07_SAPRC07_speciate_transportation.csv:
--------------------------------------------------------------------------------
1 | pollutant,emission_species,split_factor,divisor,inv_unit,emi_unit
2 | SAPRC07_AACD,AACD,1,60.05,Mmol,mol/s
3 | SAPRC07_ACET,ACET,1,58.08,Mmol,mol/s
4 | SAPRC07_ACYE,ACYE,1,26.04,Mmol,mol/s
5 | SAPRC07_ALK1,ALK1,1,30.07,Mmol,mol/s
6 | SAPRC07_ALK2,ALK2,1,36.73,Mmol,mol/s
7 | SAPRC07_ALK3,ALK3,1,58.61,Mmol,mol/s
8 | SAPRC07_ALK4,ALK4,1,77.6,Mmol,mol/s
9 | SAPRC07_ALK5,ALK5,1,118.89,Mmol,mol/s
10 | SAPRC07_ARO1,ARO1,1,95.16,Mmol,mol/s
11 | SAPRC07_ARO2,ARO2MN,1,118.72,Mmol,mol/s
12 | SAPRC07_BACL,BACL,1,86.09,Mmol,mol/s
13 | SAPRC07_BALD,BALD,1,106.13,Mmol,mol/s
14 | SAPRC07_BENZ,BENZ,1,78.11,Mmol,mol/s
15 | SAPRC07_CH4,CH4,1,16.04,Mmol,mol/s
16 | SAPRC07_CCHO,CCHO,1,44.05,Mmol,mol/s
17 | SAPRC07_CRES,CRES,1,108.14,Mmol,mol/s
18 | SAPRC07_ETHE,ETHE,1,28.05,Mmol,mol/s
19 | SAPRC07_FACD,FACD,1,46.03,Mmol,mol/s
20 | SAPRC07_GLY,GLY,1,58.04,Mmol,mol/s
21 | SAPRC07_HCHO,HCHO,1,30.03,Mmol,mol/s
22 | SAPRC07_IPRD,IPRD,1,100.12,Mmol,mol/s
23 | SAPRC07_ISOP,ISOP,1,68.12,Mmol,mol/s
24 | SAPRC07_MACR,MACR,1,70.09,Mmol,mol/s
25 | SAPRC07_MEK,MEK,1,72.11,Mmol,mol/s
26 | SAPRC07_MEOH,MEOH,1,32.04,Mmol,mol/s
27 | SAPRC07_MGLY,MGLY,1,72.04,Mmol,mol/s
28 | SAPRC07_MVK,MVK,1,70.09,Mmol,mol/s
29 | SAPRC07_OLE1,OLE1,1,72.34,Mmol,mol/s
30 | SAPRC07_OLE2,OLE2,1,75.78,Mmol,mol/s
31 | SAPRC07_PACD,PACD,1,74.08,Mmol,mol/s
32 | SAPRC07_PRD2,PRD2,1,116.16,Mmol,mol/s
33 | SAPRC07_RCHO,RCHO,1,58.08,Mmol,mol/s
34 | SAPRC07_RNO3,RNO3,1,147.18,Mmol,mol/s
35 | SAPRC07_TERP,TERP,1,136.24,Mmol,mol/s
36 | PM25,PAL,5.60E-03,1,Mg,g/s
37 | PM25,PCA,0.033303,1,Mg,g/s
38 | PM25,PCL,0.009179,1,Mg,g/s
39 | PM25,PFE,6.22E-02,1,Mg,g/s
40 | PM25,PH2O,3.71E-03,1,Mg,g/s
41 | PM25,PK,0.002276,1,Mg,g/s
42 | PM25,PMG,4.07E-02,1,Mg,g/s
43 | PM25,PMN,5.00E-04,1,Mg,g/s
44 | PM25,PMOTHR,0.264368,1,Mg,g/s
45 | PM25,PNA,2.99E-03,1,Mg,g/s
46 | PM25,PNCOM,0.375107,1,Mg,g/s
47 | PM25,PNH4,0.041369,1,Mg,g/s
48 | PM25,PNO3,0.016703,1,Mg,g/s
49 | PM25,PSI,0.043676,1,Mg,g/s
50 | PM25,PSO4,0.09631,1,Mg,g/s
51 | PM25,PTI,1.97E-03,1,Mg,g/s
52 | SO2,SO2,1,64,Mg,mol/s
53 | SO2,SULF,0,98,Mg,mol/s
54 | NH3,NH3,1,17,Mg,mol/s
55 | CO,CO,1,28,Mg,mol/s
56 | PMC,PMC,1,1,Mg,g/s
57 | NOx,NO,0.9,30,Mg,mol/s
58 | NOx,NO2,0.092,46,Mg,mol/s
59 | NOx,HONO,0.008,47,Mg,mol/s
60 | OC,POC,1,1,Mg,g/s
61 |
--------------------------------------------------------------------------------
/year2month.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/3/22 17:02
4 | # @Author :Haofan Wang
5 | # @Email :wanghf58@mail2.sysu.edu.cn
6 | import glob
7 | import os.path
8 |
9 | import pandas as pd
10 | from src import encode_title_tiff
11 | import rasterio
12 | import numpy as np
13 |
14 | if __name__ == "__main__":
15 | """
16 | At first, your title of these files are required this format as following:
17 | "UKE_1970_00__AGS__nh3.tiff"
18 | LABEL_YEAR_00__SECTOR__POLLUTANT.tiff
19 | """
20 | print("This script is written by Haofan Wang.")
21 | # Input directory including emission raster.
22 | input_dir = r"D:\Emission-Inventory\EDGAR\GTiff4IAT_year_reclassification\1970"
23 | output_dir = r"D:\Emission-Inventory\EDGAR\GTiff4IAT_year_reclassification\monthly-1970"
24 | os.system(f"mkdir {output_dir}")
25 |
26 | # Search the file in the input directory.
27 | files = glob.glob(f"{input_dir}/*.tiff")
28 | factor = pd.read_csv("temporal/monthly.csv")
29 | for file in files:
30 | print(file)
31 | # Open and read the raster as array.
32 | dataset = rasterio.open(file)
33 | data = dataset.read(1)
34 | file_info = encode_title_tiff(os.path.basename(file))
35 | label = file_info["label"]
36 | year = file_info["year"]
37 | month = file_info["month"]
38 | sector = file_info["sector"]
39 | species = file_info["pollutant"]
40 | for month_i in range(1, 13):
41 | temp_factor = factor.iloc[np.where(factor.monthly.values == month_i)][sector].values[0]
42 | new_data = data * temp_factor
43 | mm = "%.2d" % month_i
44 | output_name = f"{output_dir}/{label}_{year}_{mm}__{sector}__{species}.tiff"
45 | with rasterio.open(
46 | output_name,
47 | 'w',
48 | driver='GTiff',
49 | height=new_data.shape[0],
50 | width=new_data.shape[1],
51 | count=1,
52 | dtype=new_data.dtype,
53 | crs='+proj=latlong',
54 | transform=dataset.transform,
55 | ) as dst:
56 | dst.write(new_data, 1)
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/calculate-pmc.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import rasterio as rio
3 | import rasterio.errors
4 |
5 | if __name__ == "__main__":
6 | """
7 | Sometimes there are only inventory of PM2.5 and PM10.
8 | But in the CMAQ model, we need the PMC to input rather PM10,
9 | thus, we should calculate the PMC inventory via PM2.5 and PM10.
10 | """
11 | # Set the input directory.
12 | input_dir = r"D:\GitHub\MEIAT-CMAQ-data\MIX-2010"
13 |
14 | # Set the inventory prefix.
15 | prefix = "MIX"
16 |
17 | # Set the months.
18 | months = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]
19 | # months = ["00"]
20 |
21 | # Set the sectors.
22 |
23 | sectors = ['residential', 'transportation', 'power', 'industry', 'agriculture']
24 |
25 | # Set the years.
26 | years = [2010]
27 |
28 | for month in months:
29 | for sector in sectors:
30 | for year in years:
31 | pm25_file = f"{input_dir}/{prefix}_{year}_{month}__{sector}__PM25.tiff"
32 | pm10_file = f"{input_dir}/{prefix}_{year}_{month}__{sector}__PM10.tiff"
33 | try:
34 | pm25_dataset = rio.open(pm25_file)
35 | pm10_dataset = rio.open(pm10_file)
36 | except rasterio.errors.RasterioIOError:
37 | print(f"skip: {pm25_file} & {pm10_file}")
38 | continue
39 |
40 | height = pm25_dataset.height
41 | width = pm25_dataset.width
42 | transform = pm25_dataset.transform
43 |
44 | pm25_data = pm25_dataset.read(1)
45 | pm10_data = pm10_dataset.read(1)
46 | pmc_data = pm10_data - pm25_data
47 |
48 | # Check the PMC data.
49 | pmc_data = np.where(pmc_data < 0, 0, pmc_data)
50 |
51 | tiffile = f"{input_dir}/{prefix}_{year}_{month}__{sector}__PMC.tiff"
52 |
53 | with rio.open(tiffile,
54 | 'w',
55 | driver='GTiff',
56 | height=height,
57 | width=width,
58 | count=1,
59 | dtype=pmc_data.dtype,
60 | crs='+proj=latlong',
61 | transform=transform, ) as dst:
62 | dst.write(pmc_data, 1)
--------------------------------------------------------------------------------
/PREP/meic_2_GeoTiff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/4/29 10:48
4 | # @Author :Jiaxin Qiu
5 |
6 | import glob
7 | import os.path
8 |
9 | import tqdm
10 | import numpy as np
11 | import rasterio
12 | from rasterio.transform import from_bounds
13 | from rasterio.crs import CRS
14 | import re
15 |
16 | if __name__ == "__main__":
17 | print("This script is written by Jiaxin Qiu.")
18 | # ------------------------------------------
19 | input_dir = r"D:\GitHub\MEIAT-CMAQ-data\MEIC_SPARC07_2013"
20 | output_dir = r"D:\GitHub\MEIAT-CMAQ-data\G-MEIC_SPARC07_2013"
21 | # ------------------------------------------
22 |
23 | if os.path.exists(output_dir) is False:
24 | os.mkdir(output_dir)
25 |
26 | files = glob.glob(f"{input_dir}/*.asc")
27 |
28 | for file in tqdm.tqdm(files):
29 | sub_name = os.path.basename(file)
30 | condition = f"(.*?)_(.*?)_(.*?)_(.*?).asc"
31 | # condition = f"(.*?)_(.*?)__(.*?)__(.*?).asc" # For 2019 and 2020.
32 | encode_name = re.findall(condition, sub_name)[0]
33 | year = r"%.4d" % int(encode_name[0])
34 | mm = r"%.2d" % int(encode_name[1])
35 | sector = encode_name[2]
36 | pollutant = encode_name[3].replace(".", "")
37 | output_name = f"{output_dir}/MEIC_{year}_{mm}__{sector}__{pollutant}.tiff"
38 |
39 | # 以只读模式打开文件
40 | with open(file, 'r', encoding='utf-8') as file:
41 | # 逐行读取文件内容,并以空格为分隔符分割每行,最后将其添加到数组(列表)中
42 | lines = [line.strip().split() for index, line in enumerate(file) if index >= 6]
43 |
44 | # 打印读取到的数组(列表)
45 | _ = np.array(lines, dtype="float")
46 | z = np.where(_ == -9999.0, 0.0, _)
47 |
48 | # 最大最小经纬度
49 | min_long, min_lat, max_long, max_lat = 70.0, 10.0, 150.0, 60.0
50 |
51 | # 分辨率
52 | x_resolution = 0.25
53 | y_resolution = 0.25
54 |
55 | # 计算栅格的行和列
56 | width = int((max_long - min_long) / x_resolution)
57 | height = int((max_lat - min_lat) / y_resolution)
58 |
59 | # 创建GeoTIFF文件的变换矩阵
60 | transform = from_bounds(min_long, min_lat, max_long, max_lat, width, height)
61 |
62 | # 定义GeoTIFF文件的元数据
63 | metadata = {
64 | "driver": "GTiff",
65 | "height": height,
66 | "width": width,
67 | "count": 1,
68 | "dtype": rasterio.float32,
69 | "crs": CRS.from_epsg(4326),
70 | "transform": transform,
71 | }
72 |
73 | # 创建GeoTIFF文件
74 | with rasterio.open(output_name, "w", **metadata) as dst:
75 | dst.write(z, 1) # 将数据写入波段1
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/PREP/mix_2_GeoTiff.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python
2 | # -*-coding:utf-8 -*-
3 | # @Time : 2023/01/14 16:01
4 | # @Author : Haofan Wang
5 | # @Version : python3.9
6 | # @Email : wanghf58@mail2.sysu.edu.cn
7 |
8 | import glob
9 | import os
10 | import re
11 |
12 | import xarray as xr
13 | import tqdm
14 |
15 | import rasterio as rio
16 | from rasterio.transform import Affine
17 |
18 |
19 | if __name__ == '__main__':
20 | # The path of MIX emissions inventory.
21 | fdir = r"E:\MEIC\MIX\MIX_2010_Origin"
22 | output_dir = r"E:\MEIC\MIX\MIX_2010_Origin\MIX-2010"
23 |
24 | # Set the year of emission inventory.
25 | year = 2010
26 |
27 | if os.path.exists(output_dir) is False:
28 | os.mkdir(output_dir)
29 |
30 | # Search the files.
31 | files = glob.glob(f"{fdir}/*.nc")
32 |
33 | # Get the name of pollutants.
34 | for file in tqdm.tqdm(files):
35 | ds = xr.open_dataset(file)
36 | lats = ds.coords["lat"].__array__()
37 | lons = ds.coords["lon"].__array__()
38 | lonmin, latmax, lonmax, latmin = lons.min(), lats.max(), lons.max(), lats.min()
39 | num_lon = lons.shape[0]
40 | num_lat = lats.shape[0]
41 | res = 0.25
42 | transform = Affine.translation(lonmin - res / 2, latmin - res / 2) * Affine.scale(res, res)
43 |
44 | # Set sectors.
45 | sectors = ["POWER", "INDUSTRY", "RESIDENTIAL", "TRANSPORT", "AGRICULTURE"]
46 | file_name = os.path.basename(file)
47 | condition = fr"MICS_Asia_(.*?)_{year}_0.25x0.25.nc"
48 | pollutant = re.findall(condition, file_name)[0]
49 |
50 | for var_name in list(ds.keys()):
51 | var = ds[var_name]
52 | months = var.__getattr__("time").values
53 | for i in range(12):
54 | month = "%.2d" % months[i]
55 | temp_var = var[i, ...].values
56 | sector = var_name.split("_")[-1]
57 |
58 | if sector == "TRANSPORT":
59 | sector_label = "transportation"
60 | else:
61 | sector_label = sector.lower()
62 |
63 | tiffile = f"MIX_{year}_{month}__{sector_label}__{pollutant}.tiff"
64 | with rio.open(f"{output_dir}/{tiffile}",
65 | 'w',
66 | driver='GTiff',
67 | height=num_lat,
68 | width=num_lon,
69 | count=1,
70 | dtype=temp_var.dtype,
71 | crs='+proj=latlong',
72 | transform=transform, ) as dst:
73 | dst.write(temp_var, 1)
74 | # print(f"Finish and output {output_dir}/{tiffile}.")
75 |
--------------------------------------------------------------------------------
/UTIL/combine/combine.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/5/18 14:52
4 | # @Author :Haofan Wang
5 | # @Email :wanghf58@mail2.sysu.edu.cn
6 |
7 | from src import *
8 |
9 | if __name__ == "__main__":
10 | upper_raster_dir = r"D:\GitHub\MEIAT-CMAQ-data\MEIC-2017"
11 | bottom_raster_dir = r"D:\GitHub\MEIAT-CMAQ-data\MIX-2010"
12 | output_dir = r"D:\GitHub\MEIAT-CMAQ-data\MIX&MEIC"
13 |
14 | upper_raster_pollutants = ["NH3", "NOx", "PMcoarse", "PM25", "SO2", "CO",
15 | "CB05_ALD2", "CB05_ALDX", "CB05_CH4", "CB05_ETH", "CB05_ETHA", "CB05_ETOH", "CB05_FORM",
16 | "CB05_IOLE", "CB05_ISOP", "CB05_MEOH", "CB05_NVOL", "CB05_OLE", "CB05_PAR", "CB05_TERP",
17 | "CB05_TOL", "CB05_UNR", "CB05_XYL"]
18 | bottom_raster_pollutants = ["NH3", "NOx", "PMC", "PM25", "SO2", "CO",
19 | "CB05_ALD2", "CB05_ALDX", "CB05_CH4", "CB05_ETH", "CB05_ETHA", "CB05_ETOH", "CB05_FORM",
20 | "CB05_IOLE", "CB05_ISOP", "CB05_MEOH", "CB05_NVOL", "CB05_OLE", "CB05_PAR", "CB05_TERP",
21 | "CB05_TOL", "CB05_UNR", "CB05_XYL"]
22 | output_pollutants = ["NH3", "NOx", "PMC", "PM25", "SO2", "CO",
23 | "CB05_ALD2", "CB05_ALDX", "CB05_CH4", "CB05_ETH", "CB05_ETHA", "CB05_ETOH", "CB05_FORM",
24 | "CB05_IOLE", "CB05_ISOP", "CB05_MEOH", "CB05_NVOL", "CB05_OLE", "CB05_PAR", "CB05_TERP",
25 | "CB05_TOL", "CB05_UNR", "CB05_XYL"]
26 |
27 | upper_label = "MEIC"
28 | bottom_label = "MIX"
29 | output_label = "MEIC&MIX"
30 |
31 | upper_raster_template = fr"{upper_raster_dir}\MEIC_2017_01__agriculture__NH3.tiff"
32 | bottom_raster_template = fr"{bottom_raster_dir}\MIX_2010_01__industry__CB05_TOL.tiff"
33 |
34 | upper_resolution = 0.25
35 | bottom_resolution = 0.25
36 |
37 | sectors = ["power", "transportation", "residential", "industry", "agriculture"]
38 |
39 | upper_year = 2017
40 | bottom_year = 2010
41 | output_year = 2017
42 | # ------------------------------------------------------------------------------------------------------------------
43 |
44 | start_time = time.time()
45 | main_mosaic(
46 | upper_raster_dir,
47 | bottom_raster_dir,
48 | output_dir,
49 | upper_raster_pollutants,
50 | bottom_raster_pollutants,
51 | output_pollutants,
52 | upper_label,
53 | bottom_label,
54 | output_label,
55 | upper_raster_template,
56 | bottom_raster_template,
57 | upper_resolution,
58 | bottom_resolution,
59 | sectors,
60 | upper_year,
61 | bottom_year,
62 | output_year,
63 | )
64 | end_time = time.time()
65 | elapsed_time = end_time - start_time
66 | print(f"### Time consuming: {elapsed_time} s ###")
67 |
--------------------------------------------------------------------------------
/PREP/CEDSv2_2_GeoTiff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # @Time :2023/4/15 15:48
4 | # @Author :Jiaxin Qiu
5 |
6 | import glob
7 | import os.path
8 |
9 | import rasterio
10 | import xarray as xr
11 | import pyproj
12 | import numpy as np
13 |
14 | """
15 | Data access: https://edgar.jrc.ec.europa.eu/dataset_ap61
16 | """
17 |
18 | if __name__ == '__main__':
19 | print("This script is written by Jiaxin Qiu.")
20 |
21 | input_dir = r"I:\CEDSv2\2013"
22 | output_dir = r"I:\CEDSv2\2013\GTiff"
23 |
24 | if os.path.exists(output_dir) is False:
25 | os.mkdir(output_dir)
26 |
27 | files = glob.glob(f"{input_dir}/*.nc")
28 | for file in files:
29 | # 定义WGS84等经纬度投影
30 | crs_proj = pyproj.CRS.from_string('EPSG:4326')
31 |
32 | # 读取NetCDF数据集
33 | ds = xr.open_dataset(file)
34 |
35 | sub_name = os.path.basename(file)
36 | yyyy = sub_name.split("_")[-1].split(".")[0]
37 |
38 | # 获取所有数据变量列表
39 | variables = list(ds.data_vars.keys())
40 | # print(variables)
41 |
42 | for variable in variables:
43 | data_array = ds[variable]
44 | sector = variable.split("_")[1]
45 | pollutant = variable.split("_")[0]
46 |
47 | for month in range(1, data_array.shape[0]+1):
48 | month = r"%.2d" % month
49 | temp_data_array = data_array[int(month)-1, ...]
50 | # print(temp_data_array)
51 | # 获取空间信息
52 | height, width = temp_data_array.shape[0], temp_data_array.shape[1]
53 | lats, lons = temp_data_array.coords["lat"], temp_data_array.coords["lon"]
54 | south, north = lats.values.min(), lats.values.max()
55 | # south, north = lats.values.max(), lats.values.min()
56 | west, east = lons.values.min(), lons.values.max()
57 | # print(height, width, south, north, west, east)
58 |
59 | temp_data_array = np.flipud(np.where(temp_data_array < 0, 0, temp_data_array))
60 |
61 | # MEIAT-CMAQ: LABEL_YYYY_MM__SECTOR__POLUTANT.tiff
62 | _sub_name = f"CEDS_{yyyy}_{month}__{sector}__{pollutant}.tiff"
63 | # print(temp_data_array)
64 | # # 创建GTiff文件
65 | with rasterio.open(f'{output_dir}/{_sub_name}.tiff', 'w', driver='GTiff',
66 | width=width, height=height, count=1,
67 | dtype=temp_data_array.dtype.name, nodata=0,
68 | transform=rasterio.transform.from_bounds(west, south, east, north, width, height),
69 | crs=crs_proj) as dst:
70 | # 将NetCDF数据写入GTiff
71 | dst.write(temp_data_array * 10E-3 * 50 * 50 * 1000000 * 2592000, 1)
72 | print(f'{output_dir}/{_sub_name}')
73 | # exit()
74 |
75 |
76 |
--------------------------------------------------------------------------------
/Doc/adopt_local_emission_to_meic_cn.md:
--------------------------------------------------------------------------------
1 | # 在MEIC清单中使用本地排放源
2 |
3 | --------------------
4 |
5 | **作者:王浩帆**
6 |
7 | --------------------
8 |
9 |
10 | 在开始此教程以前,我们已经有了一份广州市的本地工业排放清单(表)。
11 |
12 | | Pollutant Name | Total Amount (Mg) |
13 | | -------------- | ----------------- |
14 | | NOx | 1000 |
15 | | SO2 | 2000 |
16 |
17 |
18 | ## 步骤1:配置namelist.input
19 |
20 | 主要注意以下两点:
21 |
22 | * 修改`big_grid_file`为:`big_grid_file = "shapefile/CN-City.shp"`
23 | * 修改`create_source`为:`create_source = 0,`
24 |
25 | ```
26 | &global
27 | griddesc_file = "input/GRIDDESC.PRD274x181"
28 | griddesc_name = "PRD274x181"
29 | big_grid_file = "shapefile/CN-City.shp"
30 | geotiff_dir = "H:/MEIC/GeoTiff-2017"
31 | inventory_label = "MEIC"
32 | inventory_year = "2017"
33 | sectors = 'transportation', 'residential', 'power', 'agriculture', 'industry'
34 | allocator = 'line', 'landscan-global-2017_nodata.tif', 'power.tif', 'agriculture.tif', 'industry.tif',
35 | allocator_type = "line", "raster", "raster", "raster", "raster"
36 | inventory_mechanism = "MEIC-CB05"
37 | target_mechanism = "CB06"
38 | start_date = "2020-07-01"
39 | end_date = "2020-07-02"
40 | cores = 4
41 | /
42 |
43 | &line
44 | line_files = "motorway.shp", "primary.shp", "residential.shp", "secondary.shp"
45 | line_factors = 0.435798, 0.326848, 0.081712, 0.155642,
46 | /
47 |
48 | &control
49 | create_grid = 1,
50 | grid_info = 1,
51 | create_factor = 1,
52 | coarse_emission = 1,
53 | create_source = 0,
54 | /
55 | ```
56 |
57 | ## 步骤2:运行`coarse_emission_2_fine_emission.py`
58 |
59 | 在终端中输入命令:
60 |
61 | ```shell
62 | python coarse_emission_2_fine_emission.py
63 | ```
64 |
65 | ## 步骤3:打开`output\zoning_statistics\MEIC_2017_07__industry__*`
66 |
67 | 1. 打开`output\zoning_statistics\MEIC_2017_07__industry__NOx.csv`,将箭头所值位置改为本地清单中的NOx排放量(1000):
68 |
69 | 
70 |
71 |
72 | 同理打开`output\zoning_statistics\MEIC_2017_07__industry__SO2.csv`,修改广州市的SO2排放量。
73 |
74 | ## 步骤4:重新配置namelist.input
75 |
76 | * 只需要修改`&control`部分即可。
77 |
78 | ```
79 | &global
80 | griddesc_file = "input/GRIDDESC.PRD274x181"
81 | griddesc_name = "PRD274x181"
82 | big_grid_file = "shapefile/CN-City.shp"
83 | geotiff_dir = "H:/MEIC/GeoTiff-2017"
84 | inventory_label = "MEIC"
85 | inventory_year = "2017"
86 | sectors = 'transportation', 'residential', 'power', 'agriculture', 'industry'
87 | allocator = 'line', 'landscan-global-2017_nodata.tif', 'power.tif', 'agriculture.tif', 'industry.tif',
88 | allocator_type = "line", "raster", "raster", "raster", "raster"
89 | inventory_mechanism = "MEIC-CB05"
90 | target_mechanism = "CB06"
91 | start_date = "2020-07-01"
92 | end_date = "2020-07-02"
93 | cores = 4
94 | /
95 |
96 | &line
97 | line_files = "motorway.shp", "primary.shp", "residential.shp", "secondary.shp"
98 | line_factors = 0.435798, 0.326848, 0.081712, 0.155642,
99 | /
100 |
101 | &control
102 | create_grid = 0,
103 | grid_info = 0,
104 | create_factor = 0,
105 | coarse_emission = 0,
106 | create_source = 1,
107 | /
108 | ```
109 |
110 | ## 步骤5:运行`coarse_emission_2_fine_emission.py`
111 |
112 | 在终端中输入命令:
113 |
114 | ```shell
115 | python coarse_emission_2_fine_emission.py
116 | ```
117 |
118 | ## 步骤6:运行`Create-CMAQ-Emission-File.py`
119 |
120 | 在终端中输入命令:
121 |
122 | ```shell
123 | python Create-CMAQ-Emission-File.py
124 | ```
--------------------------------------------------------------------------------
/Doc/adopt_meic_for_prd_emission_file_cn.md:
--------------------------------------------------------------------------------
1 | # 使用2017年的MEIC清单制作珠三角的排放文件
2 |
3 | --------------------
4 |
5 | **作者:王浩帆**
6 |
7 | --------------------
8 |
9 | 本教程通过一个制作珠江三角洲模拟域(空间分辨率:3km)的排放清单来讲解以下两个程序的运行流程。
10 | 1. [coarse_emission_2_fine_emission.py](../coarse_emission_2_fine_emission.py)
11 | 2. [Create-CMAQ-Emission-File.py](../Create-CMAQ-Emission-File.py)
12 |
13 | 本教程使用的原始排放清单为2017年MEIC清单(逐月、分部门、CB05机制),输出的排放文件为2017年7月1日至2017年7月2日的分部门排放文件(CB06,AERO7机制)。
14 |
15 | ## 第一步:准备`GRIDDESC`文件
16 |
17 | `GRIDDESC`由CMAQ的前处理系统MCIP输出,主要需要注意以下两点:
18 | * 保证每一个`GRIDDESC`中有且仅有一个网格信息。
19 | * 保证GRIDDESC中的网格投影为Lambert投影(只要你的WRF设置为Lambert并正常运行MCIP,基本都不会有错)。
20 | * 如本教程所示的`GRIDDESC`文件中,`PRD274x181`是网格名称(`griddesc_name`: 此参数将会被`namelist.input`用到)。
21 |
22 | ```shell
23 | ' '
24 | 'LamCon_40N_97W'
25 | 2 25.000 40.000 110.000 110.000 28.500
26 | ' '
27 | 'PRD274x181'
28 | 'LamCon_40N_97W' 48000.000 -902500.000 3000.000 3000.000 271 178 1
29 | ' '
30 | ```
31 |
32 | ## 第二步:准备MEIC排放清单
33 |
34 | 下载地址:http://meicmodel.org/
35 |
36 | 由于MEIC存在版权保护,因此本程序中不提供数据,请用户自行下载。
37 |
38 | 下载到的MEIC通常会提供包括`*.asc`在内的一种或几种格式(如图),但本程序所有的网格化文件,都要求使用WGS1984投影的GeoTIFF文件。
39 |
40 |
41 |
42 | 因此需要通过[MEIC转GEOTIFF工具](../PREP/meic_2_GeoTiff.py)来对MEIC进行转换。在此程序中,输入的文件仅为`*.asc`格式。
43 |
44 | 通过以下方法配置好代码以后,在终端中运行`python ./PREP/meic_2_GeoTiff.py`即可。
45 | ```python
46 | # ------------------------------------------
47 | input_dir = "MEIC的asc格式文件所在目录路径"
48 | output_dir = "输出文件所在目录路径"
49 | # ------------------------------------------
50 | ```
51 |
52 | 运行成功以后,将会在输出路径下看到系列GeoTIFF格式文件。
53 |
54 | 
55 |
56 | ## 第三步:配置`namelist.input`文件。
57 |
58 | 准备好`GRIDDESC`文件和MEIC的GeoTIFF文件以后就可以开始配置`namelist.input`来准备运行程序了。
59 |
60 | 在配置`namelist.input`文件之前,需要强调一点:**整个程序对文件命名的要求都十分严格,请按要求给定GeoTIFF文件的命名**。
61 |
62 | 命名规则为: `{LABEL}_{YYYY}_{MM}__{SECTOR}__{POLLUTANT}.tiff`
63 | * LABEL:可以自行指定,但不能包含`_`。
64 | * YYYY:清单年份,4位字符串。
65 | * MM:清单月份,2位字符串。
66 | * SECTOR:部门名称,**注意:SECTOR的前后是两个`_`,而不是一个。**
67 | * POLLUTANT:物种名称。
68 |
69 | **注意:TIFF中的单位必须是`Mg/month`或者`Mmol/month`。**
70 |
71 | 以下是一个`namelist.input`的示例:
72 |
73 | 1. 网格和文件名信息。
74 | * griddesc_file:`GRIDDESC`文件所在路径。
75 | * griddesc_name:`GRIDDESC`文件中的网格名称。
76 | * big_grid_file:粗网格的shapefile文件,该文件中必须含有`NAME`字段,类型为字符串类型,WGS1984投影。
77 | * geotiff_dir:GeoTIFF文件所在目录路径。
78 | * inventory_label:原始清单文件名中的`${LABEL}`。
79 | * inventory_year:原始清单文件名中的`${YYYY}`。
80 |
81 | 2. 空间分配信息(以下参数保证一一对应)。
82 | * sectors: 分配部门,部门为GeoTIFF文件名中所提到的`{SECTOR}`。
83 | * allocator:分配因子,分配因子所在路径必须放在[allocator](../allocator)目录中,当对应的分配类型为`line`时,读取`&line`部分所设置的参数,此处分配因子任意填写。
84 | * allocator_type:分配类型,支持`raster`分配和`line`分配。
85 |
86 | *&line*部分
87 | * line_files:存放于[allocator](../allocator)目录中线数据名称。
88 | * line_factors:`line_files`中的线数据将按照此比例进行组合分配。
89 |
90 | 3. 物种分配谱信息。
91 | * inventory_mechanism:字符串,配合`target_mechanism`使用,用于寻找对应的物种分配文件,不能含有`_`。
92 | * target_mechanism:字符串,配合`inventory_mechanism`使用,用于寻找对应的物种分配文件,不能含有`_`。
93 | **注意:物种分配谱文件存放于[species](../species)中,且物种分配文件的命名规则为:{inventory_mechanism}_{target_mechanism}_speciate_{SECTOR}**
94 |
95 | 4. 排放文件日期
96 | * start_date:排放文件的开始日期,格式:YYYY-MM-DD
97 | * end_date:排放文件的结束日期,格式:YYYY-MM-DD
98 |
99 | 5. 并行计算核心数
100 | * cores:根据自己的电脑配置设置。
101 |
102 | 6. `&control`开关控制部分
103 | * create_grid:控制生成模拟域网格shapefile。
104 | * grid_info:控制生成网格信息表格。
105 | * create_factor:控制生成排放因子文件。
106 | * coarse_emission:控制粗网格的区域统计。
107 | * create_source:控制生成source文件。
108 |
109 | ```Fortran
110 | &global
111 | griddesc_file = "input/GRIDDESC.PRD274x181"
112 | griddesc_name = "PRD274x181"
113 | big_grid_file = "shapefile/MEIC-0P25.shp"
114 | geotiff_dir = "H:\MEIC\GeoTiff-2017"
115 | inventory_label = "MEIC"
116 | inventory_year = "2017"
117 | sectors = 'transportation', 'residential', 'power', 'agriculture', 'industry'
118 | allocator = 'line', 'population-nodata-0.tif', 'ave-nodata-0.tif', 'ave-nodata-0.tif', 'ave-nodata-0.tif',
119 | allocator_type = "line", "raster", "raster", "raster", "raster"
120 | inventory_mechanism = "MEIC-CB05"
121 | target_mechanism = "CB06"
122 | start_date = "2020-07-01"
123 | end_date = "2020-07-02"
124 | cores = 4
125 | /
126 |
127 | &line
128 | line_files = "motorway.shp", "primary.shp", "residential.shp", "secondary.shp"
129 | line_factors = 0.435798, 0.326848, 0.081712, 0.155642,
130 | /
131 |
132 | &control
133 | create_grid = 1,
134 | grid_info = 1,
135 | create_factor = 1,
136 | coarse_emission = 1,
137 | create_source = 1,
138 | /
139 | ```
140 | 配置好`namelist.input`以后进入第四步。
141 |
142 | ## 第四步:完成空间分配。
143 |
144 | 将[allocator](../allocator)目录下的压缩文件解压。
145 |
146 | 该过程通过[coarse_emission_2_fine_emission.py](../coarse_emission_2_fine_emission.py)实现,输入以下命令执行脚本。
147 |
148 | ```shell
149 | python .\coarse_emission_2_fine_emission.py
150 | ```
151 |
152 | ## 第五步:完成时间分配和物种分配,并生成直接输入CMAQ的排放文件。
153 |
154 | 该过程通过[Create-CMAQ-Emission-File.py](../Create-CMAQ-Emission-File.py)实现,输入以下命令执行脚本。
155 |
156 | ```shell
157 | python .\Create-CMAQ-Emission-File.py
158 | ```
159 |
160 | 虽然在[output](../output)中成功生成了排放文件,但是在日志中发现了如下警告信息。
161 |
162 | ```shell
163 | Warning: Do not have the pollutant named PMC.
164 | ```
165 |
166 | 这是因为在对应部门的物种文件中第一列的`PMC`无法找到(如图)
167 |
168 | 
169 |
170 | 根本原因是因为,GeoTIFF所在目录下的`PMC`物种的命名为`PMcoarse`.
171 | 
172 |
173 | **解决方案**
174 | 1. 将对应部门物种文件中第一列的`PMC`改为`PMcoarse`,重新运行第五步。
175 | 2. 使用[rename_original_inventory_(pollutant).py](../UTIL/rename_original_inventory/rename_original_inventory_(pollutant).py)将`PMcoarse`改为`PMC`即可。
176 | 修改`namelist.input`中的`geotiff_dir`为新的GeoTIFF目录,重新运行第四步和第五步即可。
177 |
178 | [rename_original_inventory_(pollutant).py](../UTIL/rename_original_inventory/rename_original_inventory_(pollutant).py)使用方法见[原始排放清单重命名使用手册](../UTIL/rename_original_inventory/README.md)。
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
--------------------------------------------------------------------------------
/src.py:
--------------------------------------------------------------------------------
1 | # import datetime
2 | # import glob
3 | # import multiprocessing
4 | import os
5 | # import os.path
6 | # import re
7 | import time
8 | # import shutil
9 | #
10 | # import PseudoNetCDF as pnc
11 | # import arcgisscripting
12 | # import arcpy
13 | # import f90nml
14 | # import geopandas as gpd
15 | # import numpy as np
16 | # import pandas as pd
17 | # import pyioapi
18 | # import pyproj
19 | # import rioxarray as rxr
20 | # import tqdm
21 | # from arcpy.sa import *
22 | # from shapely.geometry import Polygon, Point
23 | # from shapely.ops import cascaded_union
24 | # from shapely.prepared import prep
25 | #
26 | # import shapefile as shp
27 |
28 | # import arcgisscripting
29 | # import arcpy
30 | # from arcpy.sa import *
31 | # # Use half of the cores on the machine
32 | # arcpy.env.parallelProcessingFactor = "50%"
33 | # arcpy.env.overwriteOutput = True
34 | import pandas as pd
35 |
36 |
37 | def user_control():
38 | import datetime
39 | specified_time = datetime.datetime(2500, 6, 30, 23, 59)
40 | # 获取当前时间
41 | current_time = datetime.datetime.now()
42 | # 检查当前时间是否已经超过指定时间
43 | if current_time > specified_time:
44 | return False
45 | else:
46 | return True
47 |
48 |
49 | def time_format():
50 | import datetime
51 | return f'{datetime.datetime.now()}|> '
52 |
53 |
54 | def main_mosaic(
55 | upper_raster_dir,
56 | bottom_raster_dir,
57 | output_dir,
58 | upper_raster_pollutants,
59 | bottom_raster_pollutants,
60 | output_pollutants,
61 | upper_label,
62 | bottom_label,
63 | output_label,
64 | upper_raster_template,
65 | bottom_raster_template,
66 | upper_resolution,
67 | bottom_resolution,
68 | sectors,
69 | upper_year,
70 | bottom_year,
71 | output_year,
72 | ):
73 | # --------------------------------------------------------------------------------------------------------
74 | if user_control() is True:
75 | print("### This system is developed by Haofan Wang. ###")
76 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
77 | else:
78 | print("### This system is developed by Haofan Wang. ###")
79 | print("### You can contact me for any suggestions. ###")
80 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
81 | print("### *************************************************** ###")
82 | print("### The current version has expired. ###")
83 | print("### Please contact me to request the latest version. ###")
84 | print("### *************************************************** ###")
85 | return
86 | # --------------------------------------------------------------------------------------------------------
87 |
88 | import rasterio
89 | import arcpy
90 | import shutil
91 |
92 | arcpy.env.parallelProcessingFactor = "50%"
93 | arcpy.env.overwriteOutput = True
94 |
95 | factor = bottom_resolution ** 2 / upper_resolution ** 2
96 |
97 | if os.path.exists(output_dir) is False:
98 | os.mkdir(output_dir)
99 |
100 | for month in range(1, 13):
101 | month = r"%.2d" % month
102 | for sector in sectors:
103 | for upper_raster_pollutant, bottom_raster_pollutant, output_pollutant in zip(upper_raster_pollutants,
104 | bottom_raster_pollutants,
105 | output_pollutants):
106 | upper_raster_file = f"{upper_raster_dir}/{upper_label}_{upper_year}_{month}__{sector}__{upper_raster_pollutant}.tiff"
107 | bottom_raster_file = f"{bottom_raster_dir}/{bottom_label}_{bottom_year}_{month}__{sector}__{bottom_raster_pollutant}.tiff"
108 |
109 | upper_raster_file_test = os.path.exists(upper_raster_file)
110 | bottom_raster_file_test = os.path.exists(bottom_raster_file)
111 |
112 | if upper_raster_file_test is False:
113 | with rasterio.open(upper_raster_template) as src:
114 | raster_data = src.read(1)
115 | output_meta = src.meta.copy()
116 | values = raster_data * 0
117 | with rasterio.open(upper_raster_file, "w", **output_meta) as dest:
118 | # 将相加后的数据写入新的栅格文件
119 | dest.write(values, 1)
120 | if bottom_raster_file_test is False:
121 | with rasterio.open(bottom_raster_template) as src:
122 | raster_data = src.read(1)
123 | output_meta = src.meta.copy()
124 | values = raster_data * 0
125 | with rasterio.open(bottom_raster_file, "w", **output_meta) as dest:
126 | # 将相加后的数据写入新的栅格文件
127 | dest.write(values, 1)
128 |
129 | with rasterio.open(bottom_raster_template) as src:
130 | raster_data = src.read(1)
131 | output_meta = src.meta.copy()
132 | with rasterio.open(bottom_raster_file, "w", **output_meta) as dest:
133 | # 将相加后的数据写入新的栅格文件
134 | dest.write(raster_data / factor, 1)
135 |
136 | output_name = f"{output_dir}/{output_label}_{output_year}_{month}__{sector}__{output_pollutant}.tif"
137 | if os.path.exists(output_name):
138 | os.remove(output_name)
139 | shutil.copy(bottom_raster_file, output_name)
140 |
141 | # 使用arcpy.sa.MosaicToNewRaster()函数将两个输入栅格镶嵌到一个新的栅格文件中
142 | arcpy.Mosaic_management([output_name, upper_raster_file],
143 | output_name,
144 | mosaic_type="LAST",
145 | nodata_value=0, )
146 | # 删除多余文件
147 | if os.path.exists(output_name[0:-4] + ".tfw"):
148 | os.remove(output_name[0:-4] + ".tfw")
149 | if os.path.exists(output_name + ".aux.xml"):
150 | os.remove(output_name + ".aux.xml")
151 | if os.path.exists(output_name + ".xml"):
152 | os.remove(output_name + ".xml")
153 | if os.path.exists(output_name + ".ovr"):
154 | os.remove(output_name + ".ovr")
155 | shutil.move(output_name, output_name + "f")
156 | print(f"Finish and output {output_name}f.")
157 |
158 |
159 | def main_vertical_allocation(files, sectors):
160 | # --------------------------------------------------------------------------------------------------------
161 | if user_control() is True:
162 | print("### This system is developed by Haofan Wang. ###")
163 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
164 | else:
165 | print("### This system is developed by Haofan Wang. ###")
166 | print("### You can contact me for any suggestions. ###")
167 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
168 | print("### *************************************************** ###")
169 | print("### The current version has expired. ###")
170 | print("### Please contact me to request the latest version. ###")
171 | print("### *************************************************** ###")
172 | return
173 | # --------------------------------------------------------------------------------------------------------
174 | import f90nml
175 | import multiprocessing
176 | # --------------------------------------------------------------------------------------------------------
177 | path = os.getcwd()
178 | output_dir = fr"{path}/output/vertical"
179 | if os.path.exists(output_dir) is False:
180 | os.mkdir(output_dir)
181 |
182 | # Set the parallel cores.
183 | example = f90nml.read("namelist.input")
184 | cores = example["global"]["cores"]
185 |
186 | # Obtain the available core number.
187 | num_cores = multiprocessing.cpu_count()
188 | print("The total core: ", num_cores)
189 | print("Your set is: ", cores)
190 | if cores > num_cores:
191 | print("Please ensure that the number of cores used "
192 | "does not exceed the maximum number of cores on your computer.")
193 | exit()
194 |
195 | # Create a threading pool that use the all cores.
196 | pool = multiprocessing.Pool(cores)
197 |
198 | # Build a argument pool.
199 | arg_pool = []
200 | for file in files:
201 | for sector in sectors:
202 | arg = (file, output_dir, sector)
203 | arg_pool.append(arg)
204 |
205 | # Start cores.
206 | results = pool.starmap(vertical_allocation, arg_pool)
207 |
208 | # Close the thread pool.
209 | pool.close()
210 | pool.join()
211 |
212 | print("Done")
213 |
214 | # def vertical_allocation(eipath, output_dir):
215 | def vertical_allocation(eipath, output_dir, sector): # Modify by Haofan Wang.
216 | import PseudoNetCDF as pnc
217 | # import pandas as pd
218 | # import numpy as np
219 | # --------------------------------------------------------------------------------------------------------
220 | if os.path.exists(output_dir) is False:
221 | os.mkdir(output_dir)
222 | e2df = pnc.pncopen(eipath, format='ioapi')
223 | varliststr = getattr(e2df, 'VAR-LIST')
224 | varlist = [varliststr[i:i + 16].strip() for i in range(0, len(varliststr), 16)]
225 | profpath = 'profile.csv'
226 | profile = pd.read_csv(profpath)
227 | nz = profile.shape[0]
228 | # outpath = 'example_3d_emissions.nc'
229 | outpath = f"{output_dir}/{os.path.basename(eipath)}"
230 | verbose = 0
231 | e3df = e2df.slice(LAY=[0] * nz)
232 | e3df.VGLVLS = profile.vglvltop.values.astype('f')
233 | e3df.NLAYS = nz
234 | e3df.VGTOP = e2df.VGTOP
235 | for key, var3d in e3df.variables.items():
236 | if key not in varlist:
237 | continue
238 | var2d = e2df.variables[key]
239 | vals2d = var2d[:]
240 | # vals3d = vals2d * profile['fraction'].values[None, :, None, None]
241 | vals3d = vals2d * profile[sector].values[None, :, None, None] # Modify by Haofan Wang.
242 | var3d[:] = vals3d
243 |
244 | if os.path.exists(outpath):
245 | os.remove(outpath)
246 | outf = e3df.save(outpath, verbose=verbose, complevel=1, format="NETCDF3_CLASSIC")
247 | outf.close()
248 |
249 |
250 | def main_coarse2fine():
251 | # --------------------------------------------------------------------------------------------------------
252 | if user_control() is True:
253 | print("### This system is developed by Haofan Wang. ###")
254 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
255 | else:
256 | print("### This system is developed by Haofan Wang. ###")
257 | print("### You can contact me for any suggestions. ###")
258 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
259 | print("### *************************************************** ###")
260 | print("### The current version has expired. ###")
261 | print("### Please contact me to request the latest version. ###")
262 | print("### *************************************************** ###")
263 | return
264 | # --------------------------------------------------------------------------------------------------------
265 | import f90nml
266 | import PseudoNetCDF as pnc
267 | import geopandas as gpd
268 | # import pandas as pd
269 | import numpy as np
270 | # --------------------------------------------------------------------------------------------------------
271 | path = os.getcwd()
272 | # --------------------------------------------------------------------------------------------------------
273 | output_dir = f"{path}/output"
274 | if os.path.exists(output_dir) is False:
275 | os.mkdir(output_dir)
276 |
277 | # --------------------------------------------------------------------------------------------------------
278 | example = f90nml.read("namelist.input")
279 | griddesc_file = example["global"]["griddesc_file"]
280 | griddesc_name = example["global"]["griddesc_name"]
281 |
282 | # --------------------------------------------------------------------------------------------------------
283 | control_create_grid = example["control"]["create_grid"]
284 | print(f"--------------Fine grid shapefile|> {output_dir}/shapefile-grid.shp--------------")
285 | if int(control_create_grid) == 1:
286 | print(
287 | f'{time_format()}The control of create grid is {control_create_grid} and processor start creating fine grid.')
288 | draw_modle_grid(griddesc_file)
289 | gf = pnc.pncopen(griddesc_file, GDNAM=griddesc_name, format="griddesc", SDATE=2023001, TSTEP=10000,
290 | withcf=False)
291 | gpdf = gpd.read_file(f"{output_dir}/shapefile-grid.shp")
292 | lon, lat = gf.ij2ll(gpdf["colnum"].values, gpdf["rownum"].values)
293 | gpdf["LON"] = lon
294 | gpdf["LAT"] = lat
295 | gpdf.to_file(f'{output_dir}/shapefile-grid.shp', driver='ESRI Shapefile', encoding='utf-8')
296 | gpdf[["ID", "colnum", "rownum", "LON", "LAT"]].to_csv(f"{output_dir}/shapefile-grid.csv")
297 | print(f'{time_format()}Finish creating fine grid shapefile.')
298 | else:
299 | print(f"{time_format()}The control of create grid is {control_create_grid}.")
300 | if os.path.exists(f"{output_dir}/shapefile-grid.shp"):
301 | print(f"{time_format()}There is fine grid shapefile and processor will continue.")
302 | else:
303 | print(f"{time_format()}There is not fine grid shapefile and processor will exit.")
304 | exit()
305 |
306 | # --------------------------------------------------------------------------------------------------------
307 | small_grid = f"{output_dir}/shapefile-grid.shp"
308 | big_grid = example["global"]["big_grid_file"]
309 | control_grid_info = example["control"]["grid_info"]
310 | grid_info_out_path = output_dir
311 | print(f"-----------------Grid information|> {grid_info_out_path}/grid_info.csv--------------")
312 | if int(control_grid_info) == 1:
313 | print(
314 | f'{time_format()}The control of grid information is {control_grid_info} and processor start for grid information.')
315 | if os.path.exists(grid_info_out_path) is False:
316 | os.makedirs(grid_info_out_path)
317 | max_id = pd.read_csv(f"{output_dir}/shapefile-grid.csv")["ID"].values.max()
318 | create_gridinfo(small_grid, big_grid, grid_info_out_path, max_id)
319 | grid_info_path = f'{grid_info_out_path}/grid_info.csv'
320 | print(f'{time_format()}Finish creating grid information.')
321 | else:
322 | print(f"{time_format()}The control of grid information is {control_grid_info}.")
323 | if os.path.exists(f'{grid_info_out_path}/grid_info.csv'):
324 | print(f"{time_format()}There is grid information and processor will continue.")
325 | else:
326 | print(f"{time_format()}There is not grid information and processor will exit.")
327 | exit()
328 |
329 | # --------------------------------------------------------------------------------------------------------
330 | ef_out_path = f'{output_dir}/factor'
331 | grid_info_path = f'{grid_info_out_path}/grid_info.csv'
332 | if os.path.exists(ef_out_path) is False:
333 | os.mkdir(ef_out_path)
334 | control_create_factor = example["control"]["create_factor"]
335 | print(f"----------------Allocation factor|> {ef_out_path}--------------")
336 | # 读取分配因子类型 分配因子路径 分配部门
337 | allocator_types = example["global"]["allocator_type"]
338 | if type(allocator_types) != type(["list"]):
339 | allocator_types = [allocator_types]
340 | allocators = example["global"]["allocator"]
341 | if type(allocators) != type(["list"]):
342 | allocators = [allocators]
343 | sectors = example["global"]["sectors"]
344 | if type(sectors) != type(["list"]):
345 | sectors = [sectors]
346 |
347 | if int(control_create_factor) == 1:
348 | print(
349 | f'{time_format()}The control of allocation factor is {control_create_factor} and processor start for allocation factor.')
350 | for allocator_type, allocator, sector in zip(allocator_types, allocators, sectors):
351 | # 判断数据是否已经存在,如果存在就直接跳过。
352 | if os.path.exists(f'{ef_out_path}/EF_{sector}.csv'):
353 | print(f"{time_format()}There is {ef_out_path}/EF_{sector}.csv and processor will skip this loop.")
354 | continue
355 | print(f"{time_format()}There is the process for {sector} and the allocator type is {allocator_type}.")
356 | if allocator_type == "line":
357 | road_dir = f"{path}/allocator"
358 | line_files = example["line"]["line_files"]
359 | line_factors = example["line"]["line_factors"]
360 | print(f"{time_format()}Allocator | {line_files}.")
361 | print(f"{time_format()}Allocator factor | {line_factors}.")
362 | road_allocation(line_files, road_dir, small_grid, big_grid, grid_info_path, ef_out_path)
363 | output_name = f'{ef_out_path}/EF_{sector}.csv'
364 | ef_files = [f"{output_dir}/factor/EF_{line_file}.csv" for line_file in line_files]
365 | road_deal(ef_files, line_factors, output_name)
366 | elif allocator_type == "raster":
367 | result_csv_path_name = f'EF_{sector}'
368 | raster_file = f"{path}/allocator/{allocator}"
369 | print(f"{time_format()}Allocator | {raster_file}.")
370 | raster_allocation(raster_file, small_grid, big_grid, grid_info_path, ef_out_path, result_csv_path_name)
371 | else:
372 | print(f"{time_format()}Allocator type is not support.| {allocator_type}.")
373 | else:
374 | print(f"{time_format()}The control of allocation is {control_create_factor}.")
375 | for sector in sectors:
376 | if os.path.exists(f'{ef_out_path}/EF_{sector}.csv'):
377 | print(f"{time_format()}There is allocation factor and processor will continue.")
378 | else:
379 | print(f"{time_format()}There is not allocation factor and processor will exit.")
380 | exit()
381 | # --------------------------------------------------------------------------------------------------------
382 | control_coarse_emis = example["control"]["coarse_emission"]
383 | print(f"----------------Coarse Emission|> f'{output_dir}/zoning_statistics'--------------")
384 | if int(control_coarse_emis) == 1:
385 | print(
386 | f'{time_format()}The control of coarse emission is {control_coarse_emis} and processor start for coarse emission.')
387 | meic_zoning_out = f'{output_dir}/zoning_statistics'
388 | if os.path.exists(meic_zoning_out) is False:
389 | os.mkdir(meic_zoning_out)
390 | geotiff_dir = example["global"]["geotiff_dir"]
391 | start_date = pd.to_datetime(example["global"]["start_date"])
392 | end_date = pd.to_datetime(example["global"]["end_date"])
393 | date_list = pd.date_range(start_date, end_date)
394 | mms = np.unique(np.array([temp_date.strftime("%m") for temp_date in date_list]))
395 | for mm in mms:
396 | zoning_statistics(small_grid, big_grid, geotiff_dir, meic_zoning_out, mm)
397 | else:
398 | print(f"{time_format()}The control of coarse emission is {control_coarse_emis}.")
399 |
400 | # --------------------------------------------------------------------------------------------------------
401 | meic_zoning_out = f'{output_dir}/zoning_statistics'
402 | control_create_source = example["control"]["create_source"]
403 | source_out_dir = f'{output_dir}/source'
404 | if os.path.exists(source_out_dir) is False:
405 | os.mkdir(source_out_dir)
406 | print(f"----------------Coarse Emission|> f'{output_dir}/source'--------------")
407 | if int(control_create_source) == 1:
408 | start_date = pd.to_datetime(example["global"]["start_date"])
409 | end_date = pd.to_datetime(example["global"]["end_date"])
410 | date_list = pd.date_range(start_date, end_date)
411 | mms = np.unique(np.array([temp_date.strftime("%m") for temp_date in date_list]))
412 | for mm in mms:
413 | print(f"Processing for month {mm}.")
414 | inventory_label = example["global"]["inventory_label"]
415 | inventory_year = example["global"]["inventory_year"]
416 | create_source(inventory_label, inventory_year, mm, sectors, ef_out_path, meic_zoning_out, source_out_dir)
417 | print(
418 | f'{time_format()}The control of create source is {control_create_source} and processor start for coarse emission.')
419 | else:
420 | print(
421 | f'{time_format()}The control of create source is {control_create_source}.')
422 |
423 | # --------------------------------------------------------------------------------------------------------
424 | print('# ------------------------------------' + 'End' + '------------------------------------ #')
425 | current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
426 | print(f"The program end time :{current_time}")
427 | print('# ------------------------------------' + '---' + '------------------------------------ #')
428 |
429 |
430 | def main_create_CMAQ_mask(gadmpath, field, output_name):
431 | # --------------------------------------------------------------------------------------------------------
432 | if user_control() is True:
433 | print("### This system is developed by Haofan Wang. ###")
434 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
435 | else:
436 | print("### This system is developed by Haofan Wang. ###")
437 | print("### You can contact me for any suggestions. ###")
438 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
439 | print("### *************************************************** ###")
440 | print("### The current version has expired. ###")
441 | print("### Please contact me to request the latest version. ###")
442 | print("### *************************************************** ###")
443 | return
444 | # --------------------------------------------------------------------------------------------------------
445 | import f90nml
446 | import shapefile as shp
447 | import numpy as np
448 | import PseudoNetCDF as pnc
449 | from shapely.geometry import Polygon, Point
450 | from shapely.ops import cascaded_union
451 | from shapely.prepared import prep
452 | # --------------------------------------------------------------------------------------------------------
453 |
454 | # Set the parallel cores.
455 | example = f90nml.read("namelist.input")
456 | grid_file = example["global"]["griddesc_file"]
457 | grid_name = example["global"]["griddesc_name"]
458 |
459 | # Open the shapefile.
460 | shpf = shp.Reader(gadmpath)
461 |
462 | # Find where is the field.
463 | fldnames = [n for n, t, l, d in shpf.fields][1:]
464 | try:
465 | pos = np.where(np.array(fldnames) == field)[0][0]
466 | except IndexError:
467 | exit("ERROR: No such field in the shapefile.")
468 |
469 | # All field attributes.
470 | _ = [rec[pos] for ri, rec in enumerate(shpf.iterRecords())]
471 | _ = np.unique(np.array(_))
472 |
473 | # Open the GRIDDESC.
474 | gf = pnc.pncopen(grid_file, format='griddesc', GDNAM=grid_name)
475 | outf = gf.eval("DUMMY = DUMMY[:] * 0")
476 |
477 | for city_name in _:
478 | # Integrate the all sub-shapefile.
479 | recordnums = [ri for ri, rec in enumerate(shpf.iterRecords()) if rec[pos] == city_name]
480 | shapes = [shpf.shape(rn) for rn in recordnums]
481 | polygons = [Polygon(s.points).buffer(0) for s in shapes]
482 | ppolygons = [prep(p) for p in polygons]
483 | uberpoly = cascaded_union(polygons)
484 | puberpoly = prep(uberpoly)
485 |
486 | evar = outf.createVariable(city_name, 'f', ('TSTEP', 'LAY', 'ROW', 'COL'))
487 |
488 | Ipl1, Jpl1 = np.meshgrid(np.arange(gf.NCOLS + 1), np.arange(gf.NROWS + 1))
489 | LONp1, LATp1 = gf.ij2ll(Ipl1, Jpl1)
490 | Xp1, Yp1 = gf.ll2xy(LONp1, LATp1)
491 | LOND, LATD = gf.xy2ll(Xp1 - gf.XCELL / 2, Yp1 - gf.YCELL / 2)
492 | LON = LONp1[:-1, :-1]
493 | LAT = LATp1[:-1, :-1]
494 |
495 | gf.SDATE = 2019001
496 | MINE = outf.variables[city_name]
497 | MINE.units = 'fraction'
498 | MINE.long_name = city_name
499 | MINE.var_desc = '0 means not mine, 1 means all mine, inbetween is partial'
500 |
501 | fractional = True
502 | for j, i in np.ndindex(gf.NROWS, gf.NCOLS):
503 | if fractional:
504 | gpoly = Polygon([
505 | [LOND[j + 0, i + 0], LATD[j + 0, i + 0]],
506 | [LOND[j + 0, i + 1], LATD[j + 0, i + 1]],
507 | [LOND[j + 1, i + 1], LATD[j + 1, i + 1]],
508 | [LOND[j + 1, i + 0], LATD[j + 1, i + 0]],
509 | [LOND[j + 0, i + 0], LATD[j + 0, i + 0]],
510 | ])
511 | if puberpoly.intersects(gpoly):
512 | intx = gpoly.intersection(uberpoly)
513 | farea = intx.area / gpoly.area
514 | MINE[0, 0, j, i] = farea
515 | else:
516 | gp = Point(LON[j, i], LAT[j, i])
517 | if puberpoly.contains(gp):
518 | if uberpoly.contains(gp):
519 | MINE[0, 0, j, i] = 1
520 |
521 | # # Get rid of initial DUMMY variable
522 | del outf.variables['DUMMY']
523 | # Remove VAR-LIST so that it can be inferred
524 | delattr(outf, 'VAR-LIST')
525 | outf.updatemeta()
526 |
527 | outf.variables['TFLAG'][:] = 0
528 | outf.SDATE = -635
529 | savedf = outf.save(output_name, verbose=0, complevel=1)
530 | savedf.close()
531 |
532 |
533 | def main_rename_original_pollutant(tbl_name, input_dir, output_dir):
534 | # --------------------------------------------------------------------------------------------------------
535 | if user_control() is True:
536 | print("### This system is developed by Haofan Wang. ###")
537 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
538 | else:
539 | print("### This system is developed by Haofan Wang. ###")
540 | print("### You can contact me for any suggestions. ###")
541 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
542 | print("### *************************************************** ###")
543 | print("### The current version has expired. ###")
544 | print("### Please contact me to request the latest version. ###")
545 | print("### *************************************************** ###")
546 | return
547 | # --------------------------------------------------------------------------------------------------------
548 | import glob
549 | import tqdm
550 | import numpy as np
551 | import shutil
552 | # --------------------------------------------------------------------------------------------------------
553 |
554 | if os.path.exists(output_dir) is False:
555 | os.mkdir(output_dir)
556 |
557 | files = glob.glob(f"{input_dir}/*.tiff")
558 |
559 | for file in tqdm.tqdm(files):
560 | sub_name = encode_title_tiff(os.path.basename(file))
561 | label = sub_name["label"]
562 | year = sub_name["year"]
563 | month = sub_name["month"]
564 | sector = sub_name["sector"]
565 | pollutant = sub_name["pollutant"]
566 |
567 | try:
568 | pos = np.where(np.array(tbl_name["org"]) == pollutant)[0][0]
569 | except IndexError:
570 | shutil.copy(file, f"{output_dir}/{os.path.basename(file)}")
571 | continue
572 |
573 | new_name = tbl_name["new"][pos]
574 |
575 | new_file_name = f"{label}_{year}_{month}__{sector}__{new_name}.tiff"
576 | output_path = f"{output_dir}/{new_file_name}"
577 | shutil.copy(file, output_path)
578 |
579 |
580 | def create_emission_file(
581 | emission_date,
582 | grid_desc,
583 | grid_name,
584 | label,
585 | sector,
586 | inventory_year,
587 | inventory_mechanism,
588 | target_mechanism):
589 | # --------------------------------------------------------------------------------------------------------
590 | # import pandas as pd
591 | import datetime
592 | import PseudoNetCDF as pnc
593 | import numpy as np
594 | # --------------------------------------------------------------------------------------------------------
595 | # Convert the emission date to other format.
596 | datetime_emission = pd.to_datetime(emission_date)
597 | yyyymmdd = datetime.datetime.strftime(datetime_emission, "%Y%m%d")
598 | # yyyy = datetime.datetime.strftime(datetime_emission, "%Y")
599 | mm = datetime.datetime.strftime(datetime_emission, "%m")
600 | # dd = datetime.datetime.strftime(datetime_emission, "%d")
601 | yyyyjjj = datetime.datetime.strftime(datetime_emission, "%Y%j")
602 | w = datetime.datetime.strftime(datetime_emission, "%w")
603 |
604 | # Create template file.
605 | gf = pnc.pncopen(grid_desc, GDNAM=grid_name, format="griddesc", SDATE=int(yyyyjjj), TSTEP=10000, withcf=False)
606 | gf.updatetflag(overwrite=True)
607 | tmpf = gf.sliceDimensions(TSTEP=[0] * 25)
608 | max_col_index = getattr(tmpf, "NCOLS") - 1
609 | max_row_index = getattr(tmpf, "NROWS") - 1
610 |
611 | # Create the source file and read it.
612 | source_file = f"output/source/source-{label}-{sector}-{inventory_year}-{mm}.csv"
613 | data = pd.read_csv(source_file)
614 |
615 | # Add I and J coordinate and calculate the total emission.
616 | I, J = gf.ll2ij(data["LON"].values, data["LAT"].values)
617 | # data["I"], data["J"] = data["rownum"], data["colnum"]
618 | data["I"], data["J"] = I, J
619 | celltotal = data.groupby(["I", "J"], as_index=False).sum()
620 | celltotal = celltotal[
621 | (celltotal["I"] >= 0)
622 | & (celltotal["J"] >= 0)
623 | & (celltotal["I"] <= max_col_index)
624 | & (celltotal["J"] <= max_row_index)
625 | ]
626 | # Read species file.
627 | species_file = (
628 | f"species/{inventory_mechanism}_{target_mechanism}_speciate_{sector}.csv"
629 | )
630 | species_info = pd.read_csv(species_file)
631 | fname_list = species_info.pollutant.values
632 | var_list = species_info.emission_species.values
633 | factor_list = species_info.split_factor.values
634 | divisor_list = species_info.divisor.values
635 | origin_units = species_info.inv_unit.values
636 | target_units = species_info.emi_unit.values
637 |
638 | # Read the temporal file.
639 | # _monthly_factor = pd.read_csv("temporal/monthly.csv")
640 | _weekly_factor = pd.read_csv("temporal/weekly.csv")
641 | _hourly_factor = pd.read_csv("temporal/hourly.csv")
642 | # monthly_factor = _monthly_factor[sector].values
643 | weekly_factor = _weekly_factor[sector].values
644 | hourly_factor = _hourly_factor[sector].values
645 |
646 | # Loop the species and create the variable to IOAPI file.
647 | items = zip(
648 | fname_list, var_list, factor_list, divisor_list, origin_units, target_units
649 | )
650 | for fname, var, split_factor, divisor, origin_unit, target_unit in items:
651 | try:
652 | # Extract the current pollutant.
653 | df = celltotal[["I", "J", fname]]
654 |
655 | # Convert monthly emission to weekly emission.
656 | weekly_values = df[fname].values * 0.25 # Version 1.0 and Version 1.1
657 | # weekly_values = converted_values * 0.25
658 |
659 | # Convert weekly emission to daily emission.
660 | daily_values = weekly_values * weekly_factor[int(w)]
661 |
662 | # Convert daily emission to hourly emission.
663 | df_list = []
664 | for hour_i in range(24):
665 | _df = pd.DataFrame(columns=["J", "I", "hour", "values"])
666 | _df["J"] = df.J.values
667 | _df["I"] = df.I.values
668 | _df["hour"] = np.zeros(df.shape[0]) + hour_i
669 | _df["values"] = daily_values * hourly_factor[hour_i]
670 | df_list.append(_df)
671 | result = pd.concat(df_list)
672 | # print(result)
673 |
674 | # Convert original units to target units and input the split_factor.
675 | if origin_unit == "Mmol" and target_unit == "mol/s":
676 | result["values"] = result["values"] * 1000000.0 / 3600.0 * split_factor
677 | elif origin_unit == "Mg" and target_unit == "g/s":
678 | result["values"] = result["values"] * 1000000.0 / 3600.0 * split_factor
679 | elif origin_unit == "Mg" and target_unit == "mol/s":
680 | result["values"] = (
681 | result["values"] * 1000000.0 / 3600.0 / divisor * split_factor
682 | )
683 |
684 | # Convert the I, J, hour to int.
685 | result[["hour", "J", "I"]] = result[["hour", "J", "I"]].astype("int")
686 | h = result.hour
687 | i = result.I
688 | j = result.J
689 |
690 | # Create the variable of emission.
691 | evar = tmpf.createVariable(var, "f", ("TSTEP", "LAY", "ROW", "COL"))
692 | if target_unit == "mol/s":
693 | evar.setncatts(dict(units="moles/s", long_name=var, var_desc=var))
694 | elif target_unit == "g/s":
695 | evar.setncatts(dict(units="g/s", long_name=var, var_desc=var))
696 |
697 | evar[h, h * 0, j, i] = result["values"].values
698 |
699 | except KeyError:
700 | # If don not have this pollutant in GeoTIFF, skip it.
701 | print(f"Warning: Do not have the pollutant named {fname}.")
702 | continue
703 | # Get rid of initial DUMMY variable
704 | del tmpf.variables["DUMMY"]
705 |
706 | # Update TFLAG to be consistent with variables
707 | tmpf.updatetflag(tstep=10000, overwrite=True)
708 |
709 | # Remove VAR-LIST so that it can be inferred
710 | delattr(tmpf, "VAR-LIST")
711 | tmpf.updatemeta()
712 |
713 | # Save out.
714 | output_name = f"output/{target_mechanism}_{sector}_{grid_name}_{yyyymmdd}.nc"
715 | tmpf.save(output_name, format="NETCDF3_CLASSIC")
716 |
717 |
718 | def create_gridinfo(small_grid, big_grid, out_path, max_id):
719 | # --------------------------------------------------------------------------------------------------------
720 | import geopandas as gpd
721 | import numpy as np
722 | import glob
723 | # --------------------------------------------------------------------------------------------------------
724 | # 删除文件夹下的输出文件
725 | try:
726 | os.remove(f'{out_path}/cmaq_grid.csv')
727 | os.remove(f'{out_path}/cmaq_intersect.csv')
728 | # current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
729 | # print(f'{current_time}: 输出目录已完成清理,开始进行网格信息核算。')
730 | except:
731 | current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
732 | # print(f'{current_time} 输出目录已完成清理,开始进行网格信息核算。')
733 |
734 | out_feature_class = f'{out_path}\cmaq_intersect.shp'
735 | gdf_smallgrid = gpd.read_file(small_grid).to_crs(epsg=4326)
736 | gdf_biggrid = gpd.read_file(big_grid).to_crs(gdf_smallgrid.crs)
737 | intersect_out = gpd.overlay(gdf_smallgrid, gdf_biggrid,how='intersection')
738 | intersect_out.to_file(out_feature_class)
739 | gdf = gpd.read_file(small_grid)
740 | out_name = 'cmaq_grid.csv'
741 | gdf.to_csv(f'{out_path}/{out_name}', index=False)
742 |
743 | # 获取经纬度
744 | df = pd.read_csv(f'{out_path}/{out_name}')
745 | df_lon = df['LON'].values
746 | df_lat = df['LAT'].values
747 |
748 | # max_id = df['ID'].values.max()
749 | gdf = gpd.read_file(out_feature_class)
750 | gdf = gdf.to_crs(epsg=4547)
751 | gdf['square'] = gdf.geometry.area / 1e6
752 | gdf.to_csv(f'{out_path}/cmaq_intersect.csv', index=False)
753 |
754 | # 此代码块则为删除面积占比较小的属性条
755 | df = pd.read_csv(f'{out_path}/cmaq_intersect.csv')
756 | df_ids = df['ID'].values
757 | df_area = df['square'].values
758 |
759 | del_list = []
760 | # Delete the min value.
761 | for temp_id in range(int(max_id)+1):
762 | if df_ids.__contains__(temp_id):
763 | pos_in_df = np.where(df_ids == temp_id)[0]
764 | temp_area = df_area[pos_in_df]
765 | pos_in_temp = np.where(temp_area != temp_area.max())[0]
766 | if len(pos_in_temp) != 0:
767 | pos = pos_in_df[pos_in_temp]
768 | for temp_pos in pos:
769 | del_list.append(temp_pos)
770 | else:
771 | unknown_gird = pd.DataFrame({'ID': temp_id,
772 | 'NAME': 'unknown',
773 | 'LON': df_lon[temp_id-1],
774 | 'LAT': df_lat[temp_id-1]},
775 | index=[0])
776 | df = df.append(unknown_gird, ignore_index=True)
777 |
778 | df = df.drop(del_list)
779 | df = df.drop_duplicates(['ID'])
780 | df.sort_values("ID", inplace=True)
781 | df.to_csv(f'{out_path}/grid_info.csv', index=False)
782 |
783 | # 删除临时文件
784 |
785 | file_list = glob.glob(f'{out_path}/cmaq_intersect.*')
786 | for file in file_list:
787 | os.remove(file)
788 |
789 | file_list = glob.glob(f'{out_path}/cmaq_grid.*')
790 | for file in file_list:
791 | os.remove(file)
792 |
793 |
794 | # -------------------------------------------------------------------
795 | # 空间分配因子计算-道路分配
796 | # -------------------------------------------------------------------
797 | def road_allocation(road_class_list, road_dir, small_grid, big_grid, grid_info, out_path):
798 | # -------------------------------------------------------------------
799 | import numpy as np
800 | import geopandas as gpd
801 | from shapely.geometry import box
802 | import glob
803 | # -------------------------------------------------------------------
804 | # out_path = r'E:\chengdu\分配因子' # 设置输出目录路径
805 |
806 | # -------------------------------------------------------------------
807 | # 空间分配因子计算-道路分配
808 | # -------------------------------------------------------------------
809 | for road_class in road_class_list:
810 |
811 | road_file = fr'{road_dir}\{road_class}' # 设置用于分配的线数据路径
812 | # print(f'-> 当前处理数据 【 {road_file} 】')
813 | out_csv_shp_name = f'EF_{road_class}' # 设置输出的文件名称
814 |
815 | try:
816 | os.remove(f'{out_path}/Big_grid_road.csv')
817 | os.remove(f'{out_path}/Small_grid_road.csv')
818 | except:
819 | current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
820 |
821 | gdf_road = gpd.read_file(road_file)
822 |
823 | # 获取smallgrid的边界box
824 | gdf_clip_grid = gpd.read_file(small_grid).to_crs(gdf_road.crs)
825 | clip_bounds = gdf_clip_grid.total_bounds # 获取grid的边界,用于获得裁剪roadshp的box
826 | clip_box = box(clip_bounds[0], clip_bounds[1], clip_bounds[2], clip_bounds[3])
827 | box_gdf = gpd.GeoDataFrame(geometry=[clip_box], crs=gdf_clip_grid.crs) # 将box转化为gdf
828 |
829 | # 计算大网格中的道路总长
830 | # Clip
831 | clipped_big_grid = f'{out_path}/Big_grid_cliped.shp'
832 | clip_out = gpd.overlay(box_gdf, gpd.read_file(big_grid).to_crs(gdf_road.crs), how='intersection')
833 | clip_out.to_file(clipped_big_grid)
834 |
835 | out_feature_class = f'{out_path}/Big_grid_road.shp'
836 | clip_big_grid = gpd.read_file(clipped_big_grid).to_crs(gdf_road.crs)
837 | gdf_road_clip = gpd.overlay(gdf_road, box_gdf,how='intersection') # 先用gird边界的box进行裁剪,再进行裁剪,以提高运行速度,通常进行空间分配的fine domian都较小
838 | intersect_out = gpd.overlay(gdf_road_clip, clip_big_grid, how='intersection')
839 | intersect_out.to_file(out_feature_class)
840 |
841 | gdf = gpd.read_file(out_feature_class)
842 | gdf = gdf.to_crs(epsg=4547) #4547
843 | gdf = gdf.explode()
844 | gdf['Length_m'] = gdf.geometry.length
845 | result = gdf.groupby('NAME', as_index=False)['Length_m'].sum()
846 | MULTILINESTRING_combine = pd.merge(result, gdf[['NAME']].drop_duplicates(), on='NAME')
847 | MULTILINESTRING_combine.to_csv(f'{out_path}/Big_grid_road.csv', index=False)
848 |
849 | temp_big_grid_sst = pd.read_csv(f'{out_path}/Big_grid_road.csv')
850 | name_list = np.unique(temp_big_grid_sst['NAME'])
851 | length_list = []
852 | for temp_area in name_list:
853 | temp_length = temp_big_grid_sst[temp_big_grid_sst['NAME'].isin([temp_area])]['Length_m'].values.sum()
854 | length_list.append(temp_length)
855 | big_grid_sst = pd.DataFrame(columns=['NAME', 'LENGTH'])
856 | big_grid_sst['NAME'] = name_list
857 | big_grid_sst['LENGTH'] = length_list
858 |
859 | # 计算小网格中的道路总长
860 | out_feature_class = f'{out_path}/Small_grid_road.shp'
861 | clip_small_out = gpd.overlay(gdf_road_clip, gpd.read_file(small_grid).to_crs(gdf_road.crs),
862 | how='intersection')
863 | clip_small_out.to_file(out_feature_class)
864 |
865 | gdf = gpd.read_file(out_feature_class)
866 | gdf = gdf.to_crs(epsg=4547) #
867 | gdf['Length_m'] = gdf.geometry.length
868 | gdf.to_csv(f'{out_path}/Small_grid_road.csv', index=False)
869 |
870 | temp_small_grid_sst = pd.read_csv(f'{out_path}/Small_grid_road.csv')
871 | id_list = np.unique(temp_small_grid_sst['ID'])
872 | length_list = []
873 | for temp_id in id_list:
874 | temp_length = temp_small_grid_sst[temp_small_grid_sst['ID'].isin([temp_id])]['Length_m'].values.sum()
875 | length_list.append(temp_length)
876 | small_grid_sst = pd.DataFrame(columns=['ID', 'LENGTH'])
877 | small_grid_sst['ID'] = id_list
878 | small_grid_sst['LENGTH'] = length_list
879 |
880 | df = pd.read_csv(grid_info)
881 | result = pd.DataFrame(columns=['ID', 'LON', 'LAT', 'AREA', 'EF'])
882 | ef_list = []
883 | id_list = []
884 | area_list = []
885 | lon_list = []
886 | lat_list = []
887 | for temp_id, temp_area, temp_lon, temp_lat in zip(df['ID'].values, df['NAME'].values, df['LON'].values,
888 | df['LAT'].values):
889 |
890 | if temp_area == 'unknown':
891 | temp_ef = 0.0
892 |
893 | ef_list.append(temp_ef)
894 | id_list.append(temp_id)
895 | area_list.append(temp_area)
896 | lon_list.append(temp_lon)
897 | lat_list.append(temp_lat)
898 | continue
899 |
900 | # print(temp_area)
901 | if small_grid_sst['ID'].values.__contains__(temp_id):
902 | temp_length = small_grid_sst[small_grid_sst['ID'].isin([temp_id])]['LENGTH'].values[0]
903 | try:
904 | temp_total_length = big_grid_sst[big_grid_sst['NAME'].isin([temp_area])]['LENGTH'].values[0]
905 | temp_ef = temp_length / temp_total_length
906 | except:
907 | temp_ef = 0.0
908 | else:
909 | temp_ef = 0.0
910 |
911 | ef_list.append(temp_ef)
912 | id_list.append(temp_id)
913 | area_list.append(temp_area)
914 | lon_list.append(temp_lon)
915 | lat_list.append(temp_lat)
916 |
917 | result['ID'] = id_list
918 | result['LON'] = lon_list
919 | result['LAT'] = lat_list
920 | result['AREA'] = area_list
921 | result['EF'] = ef_list
922 |
923 | out_name = f'{out_csv_shp_name}.csv'
924 | result_csv_path = f'{out_path}/{out_name}'
925 | result.to_csv(result_csv_path, index=False)
926 |
927 | # 删除临时文件
928 | file_list = glob.glob(f'{out_path}/Big_grid_cliped.*')
929 | for file in file_list:
930 | os.remove(file)
931 |
932 | file_list = glob.glob(f'{out_path}/Big_grid_road.*')
933 | for file in file_list:
934 | os.remove(file)
935 |
936 | file_list = glob.glob(f'{out_path}/Small_grid_road.*')
937 | for file in file_list:
938 | os.remove(file)
939 |
940 |
941 | def road_deal(ef_files, ef_factors, output_name):
942 | # -------------------------------------------------------------------
943 | # import pandas as pd
944 | # -------------------------------------------------------------------
945 | result_ef = pd.read_csv(ef_files[0])['EF'] * 0
946 | for ef_file, ef_factor in zip(ef_files, ef_factors):
947 | file_0 = pd.read_csv(ef_file)['EF']
948 | result_ef += file_0 * ef_factor
949 | df = pd.read_csv(ef_files[0])
950 | df['EF'] = result_ef
951 | df.to_csv(output_name, index=False)
952 |
953 | def raster_allocation(raster_file, small_grid, big_grid, grid_info, out_path, result_csv_path_name):
954 | # -------------------------------------------------------------------
955 | # import pandas as pd
956 | import glob
957 | import geopandas as gpd
958 | import rasterstats
959 | from shapely.geometry import box
960 | # -------------------------------------------------------------------
961 | try:
962 | os.remove(f'{out_path}/Big_grid_zonalstattblout.csv')
963 | os.remove(f'{out_path}/Small_grid_zonalstattblout.csv')
964 | except:
965 | current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
966 |
967 | # 获取smallgrid的box边界用于clip
968 | gdf_clip_grid = gpd.read_file(small_grid).to_crs(epsg=4326)
969 | clip_bounds = gdf_clip_grid.total_bounds # 获取grid的边界,用于获得裁剪roadshp的box
970 | clip_box = box(clip_bounds[0], clip_bounds[1], clip_bounds[2], clip_bounds[3])
971 | box_gdf = gpd.GeoDataFrame(geometry=[clip_box], crs=gdf_clip_grid.crs) # 将box转化为gdf
972 |
973 | clipped_big_grid = f'{out_path}/Big_grid_cliped.shp'
974 | clip_out = gpd.overlay(box_gdf, gpd.read_file(big_grid).to_crs(epsg=4326), how='intersection')
975 | clip_out.to_file(clipped_big_grid)
976 |
977 | outZSaT = rasterstats.zonal_stats(clipped_big_grid, raster_file, stats="sum", geojson_out=True, nodata=-999)
978 | result = gpd.GeoDataFrame.from_features(outZSaT)
979 | result = result.rename(columns={'sum': 'SUM'})
980 | out_name = 'Big_grid_zonalstattblout.csv'
981 | result.to_csv(f'{out_path}/{out_name}', index=False)
982 |
983 | gdf_small_grid = gpd.read_file(small_grid).to_crs(epsg=4326)
984 | outZSaT = rasterstats.zonal_stats(gdf_small_grid, raster_file, stats="sum", geojson_out=True, nodata=-999)
985 | result = gpd.GeoDataFrame.from_features(outZSaT)
986 | result = result.rename(columns={'sum': 'SUM'})
987 | out_name = 'Small_grid_zonalstattblout.csv'
988 | result.to_csv(f'{out_path}/{out_name}', index=False)
989 |
990 | df = pd.read_csv(grid_info)
991 | big_grid_zstt = pd.read_csv(f'{out_path}\Big_grid_zonalstattblout.csv')
992 | small_grid_zstt = pd.read_csv(f'{out_path}\Small_grid_zonalstattblout.csv')
993 | ef_list = []
994 | id_list = []
995 | area_list = []
996 | lon_list = []
997 | lat_list = []
998 | for temp_id, temp_area, temp_lon, temp_lat in zip(df['ID'].values, df['NAME'].values, df['LON'].values,
999 | df['LAT'].values):
1000 | if temp_area == 'unknown':
1001 | temp_ef = 0.0
1002 |
1003 | ef_list.append(temp_ef)
1004 | id_list.append(temp_id)
1005 | area_list.append(temp_area)
1006 | lon_list.append(temp_lon)
1007 | lat_list.append(temp_lat)
1008 | continue
1009 |
1010 | try:
1011 | temp_big_grid_length = big_grid_zstt[big_grid_zstt['NAME'].isin([temp_area])]['SUM'].values[0]
1012 | except:
1013 | temp_big_grid_length = 0.0
1014 |
1015 | if temp_big_grid_length == 0.0:
1016 | temp_ef = 0.0
1017 |
1018 | ef_list.append(temp_ef)
1019 | id_list.append(temp_id)
1020 | area_list.append(temp_area)
1021 | lon_list.append(temp_lon)
1022 | lat_list.append(temp_lat)
1023 | continue
1024 | try:
1025 | temp_small_grid_length = small_grid_zstt[small_grid_zstt['ID'].isin([temp_id])]['SUM'].values[0]
1026 | # temp_big_grid_length = big_grid_zstt[big_grid_zstt['NAME'].isin([temp_area])]['SUM'].values[0]
1027 | temp_ef = temp_small_grid_length / temp_big_grid_length
1028 | except:
1029 | temp_ef = 0.0
1030 |
1031 | ef_list.append(temp_ef)
1032 | id_list.append(temp_id)
1033 | area_list.append(temp_area)
1034 | lon_list.append(temp_lon)
1035 | lat_list.append(temp_lat)
1036 |
1037 | result = pd.DataFrame(columns=['ID', 'LON', 'LAT', 'AREA', 'EF'])
1038 | result['ID'] = id_list
1039 | result['LON'] = lon_list
1040 | result['LAT'] = lat_list
1041 | result['AREA'] = area_list
1042 | result['EF'] = ef_list
1043 |
1044 | result_csv_path = f'{out_path}/{result_csv_path_name}.csv'
1045 | result.to_csv(result_csv_path, index=False)
1046 |
1047 | file_list = glob.glob(f'{out_path}/Big_grid_cliped.*')
1048 | for file in file_list:
1049 | os.remove(file)
1050 |
1051 | file_list = glob.glob(f'{out_path}/Big_grid_zonalstattblout.*')
1052 | for file in file_list:
1053 | os.remove(file)
1054 |
1055 | file_list = glob.glob(f'{out_path}/Small_grid_zonalstattblout.*')
1056 | for file in file_list:
1057 | os.remove(file)
1058 |
1059 |
1060 | def split_file_extension(filebasename):
1061 | filename, file_extension = os.path.splitext(filebasename)
1062 | return filename, file_extension
1063 |
1064 |
1065 | def zoning_statistics(small_grid, big_grid, raster_dir, out_path, mm):
1066 |
1067 | import glob
1068 | import geopandas as gpd
1069 | import rasterstats
1070 | from shapely.geometry import box
1071 | import tqdm
1072 | # Use half of the cores on the machine
1073 |
1074 | if os.path.exists(out_path) is False:
1075 | os.mkdir(out_path)
1076 |
1077 | # 获取smallgrid的边界box
1078 | gdf_clip_grid = gpd.read_file(small_grid).to_crs(epsg=4326)
1079 | clip_bounds = gdf_clip_grid.total_bounds # 获取grid的边界,用于获得裁剪roadshp的box
1080 | clip_box = box(clip_bounds[0], clip_bounds[1], clip_bounds[2], clip_bounds[3])
1081 | box_gdf = gpd.GeoDataFrame(geometry=[clip_box], crs=gdf_clip_grid.crs) # 将box转化为gdf
1082 |
1083 | out_feature_class = f'{out_path}/Clipped.shp'
1084 | clip_out = gpd.overlay(box_gdf, gpd.read_file(big_grid).to_crs(epsg=4326), how='intersection')
1085 | clip_out.to_file(out_feature_class)
1086 |
1087 | file_list = glob.glob(f'{raster_dir}/*_*_{mm}__*__*.tiff')
1088 | if len(file_list) == 0:
1089 | exit(f"ERROR: There is no file in the directory: {raster_dir}")
1090 |
1091 | mid_out_list = []
1092 | for file in tqdm.tqdm(file_list, desc=f"{time_format()}Processing for month {mm}"):
1093 | # out_name = os.path.basename(file).split(".")[0]
1094 | sub_name = os.path.basename(file)
1095 | out_name, file_extension = split_file_extension(sub_name)
1096 |
1097 | # 判断文件是否存在,如果存在则直接跳过。
1098 | if os.path.exists(f'{out_path}/{out_name}.csv'):
1099 | continue
1100 | try:
1101 | outZSaT = rasterstats.zonal_stats(out_feature_class, file, stats="sum", geojson_out=True, nodata=-999)
1102 | result = gpd.GeoDataFrame.from_features(outZSaT)
1103 | result = result[result['sum'].notnull()] # 删除没有算出EF的部分
1104 | result = result.rename(columns={'sum': 'SUM'})
1105 | result.to_csv(f'{out_path}/{out_name}.csv', index=False)
1106 | mid_out_list.append(f'{out_path}/{out_name}.csv')
1107 | except :
1108 | exit(f"ERROR: Please double check the file : {file}")
1109 |
1110 | # 删除多余文件
1111 | file_list = glob.glob(f'{out_path}/*.cpg')
1112 | for file in file_list:
1113 | os.remove(file)
1114 |
1115 | file_list = glob.glob(f'{out_path}/*.xml')
1116 | for file in file_list:
1117 | os.remove(file)
1118 |
1119 | file_list = glob.glob(f'{out_path}/*.dbf')
1120 | for file in file_list:
1121 | os.remove(file)
1122 |
1123 | file_list = glob.glob(f'{out_path}/Clipped.*')
1124 | for file in file_list:
1125 | os.remove(file)
1126 |
1127 |
1128 | def draw_modle_grid(griddesc_file):
1129 | import pyioapi
1130 | import geopandas as gpd
1131 | from shapely.geometry import Polygon
1132 | import pyproj
1133 | # import pandas as pd
1134 | # 读入GRIDDESC文件
1135 | griddesc = pyioapi.GRIDDESC(griddesc_file)
1136 | # 获取网格参数
1137 | grid = griddesc.get_grid(list(griddesc.grids)[0])
1138 | # 获取坐标系参数
1139 | coord = griddesc.get_coord(list(griddesc.coords)[0])
1140 | # 根据网格和坐标系名设置输出名
1141 | outshp_name = list(griddesc.coords)[0] + "_" + list(griddesc.grids)[0]
1142 |
1143 | # 初始化起始格网四角范围
1144 | ringXleftOrigin = grid.XORIG
1145 | ringXrightOrigin = ringXleftOrigin + grid.XCELL
1146 | ringYbottomOrigin = grid.YORIG
1147 | ringYtopOrigin = ringYbottomOrigin + grid.YCELL
1148 |
1149 | # 遍历列,每一列写入格网
1150 | col = 1
1151 | idd_num = 1
1152 | grids_list = [] # 用于存储生成的各个网格
1153 | while col <= grid.NCOLS:
1154 | # 初始化,每一列写入完成都把上下范围初始化
1155 | ringYtop = ringYtopOrigin
1156 | ringYbottom = ringYbottomOrigin
1157 | # 遍历行,对这一列每一行格子创建和写入
1158 | row = 1
1159 | while row <= grid.NROWS:
1160 | # 创建左下角第一个格子
1161 | ring = Polygon([(ringXleftOrigin, ringYbottom),
1162 | (ringXleftOrigin, ringYtop),
1163 | (ringXrightOrigin, ringYtop),
1164 | (ringXrightOrigin, ringYbottom)])
1165 | # 写入几何多边形
1166 | geo_df = gpd.GeoDataFrame(data=[[str(idd_num-1), row-1, col-1]],
1167 | geometry=[ring],
1168 | columns=['ID', 'rownum', 'colnum'])
1169 | grids_list.append(geo_df)
1170 |
1171 | # 下一多边形,更新上下范围
1172 | row += 1
1173 | ringYtop = ringYtop + grid.YCELL
1174 | ringYbottom = ringYbottom + grid.YCELL
1175 |
1176 | idd_num += 1
1177 | # 一列写入完成后,下一列,更新左右范围
1178 | col += 1
1179 | ringXleftOrigin = ringXleftOrigin + grid.XCELL
1180 | ringXrightOrigin = ringXrightOrigin + grid.XCELL
1181 | # 合并列表中的网格
1182 | gdf_fishgrid = pd.concat(grids_list)
1183 |
1184 | # 使用GRIDDESC的参数设置网格投影
1185 | wrf_proj = pyproj.Proj('+proj=lcc ' + '+lon_0=' + str(coord.XCENT) + ' lat_0=' + str(coord.YCENT)
1186 | + ' +lat_1=' + str(coord.P_ALP) + ' +lat_2=' + str(coord.P_BET))
1187 | # 设置网格投影
1188 | gdf_fishgrid.crs = wrf_proj.crs
1189 | # 输出模型网格shp文件
1190 | gdf_fishgrid.to_file('output/shapefile-grid.shp',
1191 | driver='ESRI Shapefile',
1192 | encoding='utf-8')
1193 |
1194 |
1195 | # 基于MEIC清单的source文件构建
1196 | def create_source(label, year, mm, development_list, emission_factor_dir, emission_data_dir, out_path):
1197 | # import pandas as pd
1198 | import glob
1199 | import tqdm
1200 |
1201 | for development in development_list:
1202 | emission_factor_file = f'{emission_factor_dir}/EF_{development}.csv'
1203 | if os.path.exists(f"{out_path}/source-{label}-{development}-{year}-{mm}.csv"):
1204 | continue
1205 |
1206 | # 读取排放因子文件
1207 | ef_data = pd.read_csv(emission_factor_file)
1208 |
1209 | result = pd.DataFrame(columns=["LON", "LAT"])
1210 | result['LON'] = ef_data['LON']
1211 | result['LAT'] = ef_data['LAT']
1212 |
1213 | files = glob.glob(f'{emission_data_dir}/{label}_{year}_{mm}__{development}__*.csv')
1214 |
1215 | for file in tqdm.tqdm(files, desc=f'Create source file of {development}'):
1216 | specie = encode_title(os.path.basename(file))["pollutant"]
1217 | data = pd.read_csv(file)
1218 | values = []
1219 | for i in range(ef_data.shape[0]):
1220 | temp_area = ef_data['AREA'].values[i]
1221 | temp_ef = ef_data['EF'].values[i]
1222 | temp_big_emis = data[data['NAME'].isin([temp_area])]['SUM'].values
1223 | temp_small_emis = temp_ef * temp_big_emis
1224 | try:
1225 | values.append(temp_small_emis[0])
1226 | except IndexError:
1227 | values.append(0.0)
1228 | result[specie] = values
1229 |
1230 | result.to_csv(f"{out_path}/source-{label}-{development}-{year}-{mm}.csv", index=False)
1231 |
1232 |
1233 | def encode_title(file_name):
1234 | import re
1235 | # Get the species name from file name.
1236 | condition = f"(.*?)_(.*?)_(.*?)__(.*?)__(.*?).csv"
1237 | encode_name = re.findall(condition, file_name)[0]
1238 | label = encode_name[0]
1239 | year = encode_name[1]
1240 | month = encode_name[2]
1241 | sector = encode_name[3]
1242 | pollutant = encode_name[4]
1243 | dict = {"label": label,
1244 | "year": year,
1245 | "month": month,
1246 | "sector": sector,
1247 | "pollutant": pollutant}
1248 | return dict
1249 |
1250 |
1251 | def encode_title_tiff(file_name):
1252 | import re
1253 | # Get the species name from file name.
1254 | condition = f"(.*?)_(.*?)_(.*?)__(.*?)__(.*?).tiff"
1255 | encode_name = re.findall(condition, file_name)[0]
1256 | label = encode_name[0]
1257 | year = encode_name[1]
1258 | month = encode_name[2]
1259 | sector = encode_name[3]
1260 | pollutant = encode_name[4]
1261 | dict = {"label": label,
1262 | "year": year,
1263 | "month": month,
1264 | "sector": sector,
1265 | "pollutant": pollutant}
1266 | return dict
1267 |
1268 |
1269 | def read_tiff(file):
1270 | import rioxarray as rxr
1271 | import numpy as np
1272 | """
1273 |
1274 | :param file: The file path of GeoTIFF.
1275 | :return: value, longitude, latitude.
1276 | """
1277 | dataset = rxr.open_rasterio(file)
1278 | longitude = dataset.coords["x"].values
1279 | latitude = dataset.coords["y"].values
1280 | lons, lats = np.meshgrid(longitude, latitude)
1281 | value = dataset[0, ...].values
1282 | return value, lons, lats
1283 |
1284 |
1285 | def create_source_table(input_dir, mm, sector):
1286 | import glob
1287 | # import pandas as pd
1288 | import re
1289 | """
1290 |
1291 | :param input_dir: the path of input directory which store the GeoTIFF files.
1292 | :param mm: the month of emission file date.
1293 | :param sector: the sector of emission file.
1294 | :return: DataFrame, a table include latitude, longitude and species values.
1295 | """
1296 | # Search the files in the input directory.
1297 | files = glob.glob(f"{input_dir}/*_{mm}__{sector}__*.tiff")
1298 |
1299 | # Build a DataFrame to store the inventory information.
1300 | df = pd.DataFrame(columns=["lat", "lon"])
1301 | # --- Start loop of the sector files.
1302 | for file in files:
1303 | # Read the GeoTIFF and return the value and geographic information.
1304 | value, lons, lats = read_tiff(file)
1305 |
1306 | # Get the species name from file name.
1307 | basename = os.path.basename(file)
1308 | condition = f"(.*?)_(.*?)_{mm}__{sector}__(.*?).tiff"
1309 | encode_name = re.findall(condition, basename)[0]
1310 | # label = encode_name[0]
1311 | # year = encode_name[1]
1312 | pollutant = encode_name[2]
1313 | # Put the pollutant to pd.DataFrame.
1314 | df[pollutant] = value.flatten()
1315 | df["lat"] = lats.flatten()
1316 | df["lon"] = lons.flatten()
1317 | return df
1318 |
1319 |
1320 | def source2cmaq(
1321 | emission_date,
1322 | grid_desc,
1323 | grid_name,
1324 | sector,
1325 | input_dir,
1326 | inventory_mechanism,
1327 | target_mechanism,
1328 | output_dir,
1329 | ):
1330 | """
1331 |
1332 | :param emission_date: set the date for CMAQ emission file.
1333 | :param grid_desc: the path of GRIDDESC file.
1334 | :param grid_name: set the grid name.
1335 | :param sector: the sector of original emission inventory.
1336 | :param input_dir: the path of input directory which store the GeoTIFF files.
1337 | :param inventory_mechanism: set chemical mechanism of inventory.
1338 | :param target_mechanism: set chemical mechanism of CMAQ emission file.
1339 | :param output_dir: set the output directory.
1340 | :return:
1341 | """
1342 | # import pandas as pd
1343 | import datetime
1344 | import PseudoNetCDF as pnc
1345 | import numpy as np
1346 |
1347 | # Convert the emission date to other format.
1348 | datetime_emission = pd.to_datetime(emission_date)
1349 | yyyymmdd = datetime.datetime.strftime(datetime_emission, "%Y%m%d")
1350 | # yyyy = datetime.datetime.strftime(datetime_emission, "%Y")
1351 | mm = datetime.datetime.strftime(datetime_emission, "%m")
1352 | # dd = datetime.datetime.strftime(datetime_emission, "%d")
1353 | yyyyjjj = datetime.datetime.strftime(datetime_emission, "%Y%j")
1354 | w = datetime.datetime.strftime(datetime_emission, "%w")
1355 |
1356 | # Create template file.
1357 | gf = pnc.pncopen(
1358 | grid_desc,
1359 | GDNAM=grid_name,
1360 | format="griddesc",
1361 | SDATE=int(yyyyjjj),
1362 | TSTEP=10000,
1363 | withcf=False,
1364 | )
1365 | gf.updatetflag(overwrite=True)
1366 | tmpf = gf.sliceDimensions(TSTEP=[0] * 25)
1367 | max_col_index = getattr(tmpf, "NCOLS") - 1
1368 | max_row_index = getattr(tmpf, "NROWS") - 1
1369 |
1370 | # Create the source file and read it.
1371 | data = create_source_table(input_dir, mm, sector)
1372 |
1373 | # Add I and J coordinate and calculate the total emission.
1374 | data["I"], data["J"] = tmpf.ll2ij(data.lon.values, data.lat.values)
1375 | celltotal = data.groupby(["I", "J"], as_index=False).sum()
1376 | celltotal = celltotal[
1377 | (celltotal["I"] >= 0)
1378 | & (celltotal["J"] >= 0)
1379 | & (celltotal["I"] <= max_col_index)
1380 | & (celltotal["J"] <= max_row_index)
1381 | ]
1382 | celltotal = celltotal.drop(columns=["lon", "lat"], axis=1)
1383 |
1384 | # Read species file.
1385 | species_file = (
1386 | f"species/{inventory_mechanism}_{target_mechanism}_speciate_{sector}.csv"
1387 | )
1388 | species_info = pd.read_csv(species_file)
1389 | fname_list = species_info.pollutant.values
1390 | var_list = species_info.emission_species.values
1391 | factor_list = species_info.split_factor.values
1392 | divisor_list = species_info.divisor.values
1393 | origin_units = species_info.inv_unit.values
1394 | target_units = species_info.emi_unit.values
1395 |
1396 | # Read the temporal file.
1397 | # _monthly_factor = pd.read_csv("temporal/monthly.csv")
1398 | _weekly_factor = pd.read_csv("temporal/weekly.csv")
1399 | _hourly_factor = pd.read_csv("temporal/hourly.csv")
1400 | # monthly_factor = _monthly_factor[sector].values
1401 | weekly_factor = _weekly_factor[sector].values
1402 | hourly_factor = _hourly_factor[sector].values
1403 |
1404 | # Loop the species and create the variable to IOAPI file.
1405 | items = zip(
1406 | fname_list, var_list, factor_list, divisor_list, origin_units, target_units
1407 | )
1408 | for fname, var, split_factor, divisor, origin_unit, target_unit in items:
1409 | try:
1410 | # Extract the current pollutant.
1411 | df = celltotal[["I", "J", fname]]
1412 |
1413 | # Convert monthly emission to weekly emission.
1414 | weekly_values = df[fname].values * 0.25
1415 |
1416 | # Convert weekly emission to daily emission.
1417 | daily_values = weekly_values * weekly_factor[int(w)]
1418 |
1419 | # Convert daily emission to hourly emission.
1420 | df_list = []
1421 | for hour_i in range(24):
1422 | _df = pd.DataFrame(columns=["J", "I", "hour", "values"])
1423 | _df["J"] = df.J.values
1424 | _df["I"] = df.I.values
1425 | _df["hour"] = np.zeros(df.shape[0]) + hour_i
1426 | _df["values"] = daily_values * hourly_factor[hour_i]
1427 | df_list.append(_df)
1428 | result = pd.concat(df_list)
1429 |
1430 | # Convert original units to target units and input the split_factor.
1431 | if origin_unit == "Mmol" and target_unit == "mol/s":
1432 | result["values"] = result["values"] * 1000000.0 / 3600.0 * split_factor
1433 | elif origin_unit == "Mg" and target_unit == "g/s":
1434 | result["values"] = result["values"] * 1000000.0 / 3600.0 * split_factor
1435 | elif origin_unit == "Mg" and target_unit == "mol/s":
1436 | result["values"] = (
1437 | result["values"] * 1000000.0 / 3600.0 / divisor * split_factor
1438 | )
1439 |
1440 | # Convert the I, J, hour to int.
1441 | result[["hour", "J", "I"]] = result[["hour", "J", "I"]].astype("int")
1442 | h = result.hour
1443 | i = result.I
1444 | j = result.J
1445 |
1446 | # Create the variable of emission.
1447 | evar = tmpf.createVariable(var, "f", ("TSTEP", "LAY", "ROW", "COL"))
1448 | if target_unit == "mol/s":
1449 | evar.setncatts(dict(units="moles/s", long_name=var, var_desc=var))
1450 | elif target_unit == "g/s":
1451 | evar.setncatts(dict(units="g/s", long_name=var, var_desc=var))
1452 | evar[h, h * 0, j, i] = result["values"].values
1453 |
1454 | except KeyError:
1455 | # If don not have this pollutant in GeoTIFF, skip it.
1456 | print(f"Warning: Do not have the pollutant named {fname}.")
1457 | continue
1458 | # Get rid of initial DUMMY variable
1459 | del tmpf.variables["DUMMY"]
1460 |
1461 | # Update TFLAG to be consistent with variables
1462 | tmpf.updatetflag(tstep=10000, overwrite=True)
1463 |
1464 | # Remove VAR-LIST so that it can be inferred
1465 | delattr(tmpf, "VAR-LIST")
1466 | tmpf.updatemeta()
1467 |
1468 | # Save out.
1469 | # output_name = f"{output_dir}/{grid_name}_{yyyymmdd}_{target_mechanism}_{sector}.nc" # 1.4
1470 | output_name = f"{output_dir}/{target_mechanism}_{sector}_{grid_name}_{yyyymmdd}.nc" # 1.5
1471 | tmpf.save(output_name, format="NETCDF3_CLASSIC")
1472 | tmpf.close()
1473 | # print(f"Finish: {output_name}")
1474 |
1475 |
1476 | def read_nml(nml_path):
1477 | import f90nml
1478 | example = f90nml.read(nml_path)
1479 | griddesc_file = example["global"]["griddesc_file"]
1480 | griddesc_name = example["global"]["griddesc_name"]
1481 | big_grid_file = example["global"]["big_grid_file"]
1482 | inventory_year = example["global"]["inventory_year"]
1483 | sectors = example["global"]["sectors"]
1484 | geotiff_dir = example["global"]["geotiff_dir"]
1485 | allocation_raster = example["global"]["allocator"]
1486 | inventory_label = example["global"]["inventory_label"]
1487 | inventory_mechanism = example["global"]["inventory_mechanism"]
1488 | target_mechanism = example["global"]["target_mechanism"]
1489 | start_date = example["global"]["start_date"]
1490 | end_date = example["global"]["end_date"]
1491 | cores = example["global"]["cores"]
1492 | return griddesc_file, griddesc_name,\
1493 | big_grid_file, inventory_year,\
1494 | sectors, geotiff_dir, allocation_raster, inventory_label, inventory_mechanism, target_mechanism,\
1495 | start_date, end_date, cores
1496 |
1497 |
1498 | def main_f2c():
1499 | # --------------------------------------------------------------------------------------------------------
1500 | if user_control() is True:
1501 | print("### This system is developed by Haofan Wang. ###")
1502 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
1503 | else:
1504 | print("### This system is developed by Haofan Wang. ###")
1505 | print("### You can contact me for any suggestions. ###")
1506 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
1507 | print("### *************************************************** ###")
1508 | print("### The current version has expired. ###")
1509 | print("### Please contact me to request the latest version. ###")
1510 | print("### *************************************************** ###")
1511 | return
1512 | # --------------------------------------------------------------------------------------------------------
1513 | import f90nml
1514 | import multiprocessing
1515 | # --------------------------------------------------------------------------------------------------------
1516 | example = f90nml.read("namelist.input")
1517 | gridFile = example["global"]["griddesc_file"]
1518 | gridName = example["global"]["griddesc_name"]
1519 |
1520 | # The path of input directory which store the GeoTIFF files.
1521 | input_directory = example["global"]["geotiff_dir"]
1522 |
1523 | # Set the sectors of these emission inventory.
1524 | sectors = example["global"]["sectors"]
1525 | if type(sectors) != type(["list"]):
1526 | sectors = [sectors]
1527 |
1528 | # Set the date for CMAQ emission file.
1529 | start_date = pd.to_datetime(example["global"]["start_date"])
1530 | end_date = pd.to_datetime(example["global"]["end_date"])
1531 |
1532 | # Set chemical mechanism. This parameter is set for species file.
1533 | inventory_mechanism = example["global"]["inventory_mechanism"]
1534 | target_mechanism = example["global"]["target_mechanism"]
1535 |
1536 | # Set the output directory.
1537 | path = os.getcwd()
1538 | output_directory = f"{path}/output"
1539 |
1540 | # Set the parallel cores.
1541 | cores = example["global"]["cores"]
1542 |
1543 | # Make output directory.
1544 | if not os.path.exists(output_directory):
1545 | os.makedirs(output_directory)
1546 |
1547 | # Obtain the available core number.
1548 | num_cores = multiprocessing.cpu_count()
1549 | print("The total core: ", num_cores)
1550 | print("Your set is: ", cores)
1551 | if cores > num_cores:
1552 | print("Please ensure that the number of cores used "
1553 | "does not exceed the maximum number of cores on your computer.")
1554 | exit()
1555 |
1556 | # Create a threading pool that use the all cores.
1557 | pool = multiprocessing.Pool(cores)
1558 |
1559 | # Build a argument pool.
1560 | # import pandas as pd
1561 | emission_dates = [str(date) for date in pd.period_range(pd.to_datetime(start_date), pd.to_datetime(end_date))]
1562 | arg_pool = []
1563 | for emisfile_date in emission_dates:
1564 | for sector in sectors:
1565 | arg = (emisfile_date, gridFile, gridName, sector, input_directory,
1566 | inventory_mechanism, target_mechanism, output_directory)
1567 | arg_pool.append(arg)
1568 |
1569 | # Start cores.
1570 | results = pool.starmap(source2cmaq, arg_pool)
1571 |
1572 | # Close the thread pool.
1573 | pool.close()
1574 | pool.join()
1575 |
1576 | print("Done")
1577 |
1578 |
1579 | def read_allocator(nml_path):
1580 | import f90nml
1581 | example = f90nml.read(nml_path)
1582 | allocator_types = example["global"]["allocator_type"]
1583 | return allocator_types
1584 |
1585 |
1586 | def read_line(nml_path):
1587 | import f90nml
1588 | example = f90nml.read(nml_path)
1589 | line_files = example["line"]["line_files"]
1590 | line_factors = example["line"]["line_factors"]
1591 | return line_files, line_factors
1592 |
1593 |
1594 | def main_createCMAQ():
1595 | # --------------------------------------------------------------------------------------------------------
1596 | if user_control() is True:
1597 | print("### This system is developed by Haofan Wang. ###")
1598 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
1599 | else:
1600 | print("### This system is developed by Haofan Wang. ###")
1601 | print("### You can contact me for any suggestions. ###")
1602 | print("### Email: wanghf58@mail2.sysu.edu.cn ###")
1603 | print("### *************************************************** ###")
1604 | print("### The current version has expired. ###")
1605 | print("### Please contact me to request the latest version. ###")
1606 | print("### *************************************************** ###")
1607 | return
1608 | # --------------------------------------------------------------------------------------------------------
1609 | import multiprocessing
1610 | # --------------------------------------------------------------------------------------------------------
1611 | griddesc_file, griddesc_name, big_grid_file, inventory_year, sectors, geotiff_dir, allocation_raster, \
1612 | inventory_label, inventory_mechanism, target_mechanism, start_date, end_date, cores = read_nml("namelist.input")
1613 |
1614 | if type(sectors) != type(["list"]):
1615 | _sectors = []
1616 | _sectors.append(sectors)
1617 | sectors = _sectors
1618 |
1619 | # Obtain the available core number.
1620 | num_cores = multiprocessing.cpu_count()
1621 | print("The total core: ", num_cores)
1622 | print("Your set is: ", cores)
1623 | if cores > num_cores:
1624 | print("Please ensure that the number of cores used "
1625 | "does not exceed the maximum number of cores on your computer.")
1626 | exit()
1627 |
1628 | # Create a threading pool that use the all cores.
1629 | pool = multiprocessing.Pool(cores)
1630 |
1631 | # Build a argument pool.
1632 | emission_dates = [str(date) for date in pd.period_range(pd.to_datetime(start_date), pd.to_datetime(end_date))]
1633 | arg_pool = []
1634 | for emisfile_date in emission_dates:
1635 | for sector in sectors:
1636 | arg = (
1637 | emisfile_date, griddesc_file, griddesc_name, inventory_label, sector, inventory_year, inventory_mechanism,
1638 | target_mechanism)
1639 |
1640 | arg_pool.append(arg)
1641 |
1642 | # Start cores.
1643 | results = pool.starmap(create_emission_file, arg_pool)
1644 |
1645 | # Close the thread pool.
1646 | pool.close()
1647 | pool.join()
1648 |
1649 | print("Done")
1650 |
--------------------------------------------------------------------------------