├── .gitignore
├── AddNearAtrributesDirections
├── AddNearAttributesDirections-CN-V1.0.py
├── AddNearAttributesDirections-EN-V1.0.py
└── AddNearAttributesDirections.tbx
├── CSV2PtsShp
├── CSV2PtsShp.py
└── designed_samples.csv
├── DelRepeatFeatures
└── DelRepeat.py
├── DownloadModis
├── download_modis_nasa_earthdata.py
└── utils.py
├── ExtractMultiValue2Zones
├── ExtMultiVal2Polygon.py
├── ExtractMultiValue2Zones.py
└── ExtractMultiValue2Zones.tbx
├── ExtractRasterByMultiPolygon
├── .idea
│ ├── ExtractRasterByMultiPolygon.iml
│ ├── encodings.xml
│ ├── misc.xml
│ ├── modules.xml
│ └── workspace.xml
└── ExtractRasterByMultiPolygon.py
├── HydroDataDownload
├── CreateDatabase_SURF_CLI_CHN_MUL_DAY.py
├── Down_Fujian_RealTimeData_Shuizhi.py
├── Down_Fujian_shuizhi_zhoubao.py
├── GetYcRainSum_20150805-zhulm.py
├── ReadDatabase_SURF_CLI_CHN_MUL_DAY.py
├── Read_hedao20150901 - zhulj.py
├── Read_hedao20150901.py
├── anhui_precipitation_download.py
├── climate_download.py
├── netcdf4_pydap_test.py
├── test.py
└── trmm_download.py
├── Hydrograph
├── Hydrograph-Storm.py
├── ObsS.txt
├── prec.txt
└── simuS.txt
├── NSGA2
├── .idea
│ ├── NSGA2.iml
│ ├── encodings.xml
│ ├── misc.xml
│ ├── modules.xml
│ └── workspace.xml
├── deap
│ ├── dbf_test.py
│ └── demo1.py
├── inspyred
│ ├── nsga_example_inspyred.py
│ └── parallel_evaluation_pp_example.py
├── nsga_example.py
└── parallel_evaluation_pp_example.py
├── README.md
├── RUSLE_LS
├── RUSLE.tbx
├── RUSLE_LS(Tool).py
└── RUSLE_LS_4_PC.AML
├── RillChannelExtraction
├── IdentifyRillRidges.py
└── RillChannelExtraction.py
├── RillPy
├── Hillslope.py
├── Memo.py
├── README.txt
├── Rill.py
├── ShoulderLine.py
├── Subbasin.py
├── Util.py
├── __init__.py
└── main.py
├── SWAT_post_process
├── Read_SWAT_Output_MDB.py
├── Update_SWAT_mdb_from_SWAT_CUP.py
├── __init__.py
└── stats_SWAT_Output_mdb.py
├── SWATplusUtility
├── __init__.py
└── create_pond_points.py
├── TIN_Hydro
├── HydroTIN.bak
├── HydroTIN.py
├── ShapefileIO.py
├── User manual-zhulj-2016-2-20.docx
├── XYZ2ShpPoint_GDAL.py
├── _project
├── backup
│ ├── CGAL-test.py
│ ├── fit.py
│ └── test.py
├── data
│ ├── test.dbf
│ ├── test.sbn
│ ├── test.sbx
│ ├── test.shp
│ └── test.shx
├── env
│ ├── Config.txt
│ ├── x64_python
│ │ └── CGAL-Python-0.9.4b1.win-amd64-py2.7.exe
│ └── x86_python
│ │ ├── CGAL-Python-0.9.4b1.win32-py2.7.exe
│ │ ├── GDAL-1.11.2-cp27-none-win32.whl
│ │ ├── alglib-3.10.0.cpython.gpl.zip
│ │ ├── pip-7.1.0.tar.gz
│ │ ├── setuptools-18.2.zip
│ │ └── vcredist_x86.exe
└── main.py
├── Util
├── GeoTIFF_Converter.py
├── HardenSlpPos_Compare.py
├── Similarity_Compare.py
├── TauDEM.py
├── Util.py
├── __init__.py
├── available_font_matplotlib.py
├── normalize_for_SoLIM_20141110.py
├── pond_preprocess.py
├── rasterCalculator.py
└── test_chinese_matplotlib.py
└── test
├── DEAP_tutorial.py
├── SCOOP_tutorial.py
├── TidyZotero.py
├── asc2tif.py
├── down_ts.py
├── down_ts_linux.py
├── mongoclient.py
├── numpy_test.py
├── pyqgis_test.py
└── uniqueID_scoop.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | *.egg-info/
23 | .installed.cfg
24 | *.egg
25 |
26 | # PyInstaller
27 | # Usually these files are written by a python script from a template
28 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
29 | *.manifest
30 | *.spec
31 |
32 | # Installer logs
33 | pip-log.txt
34 | pip-delete-this-directory.txt
35 |
36 | # Unit test / coverage reports
37 | htmlcov/
38 | .tox/
39 | .coverage
40 | .coverage.*
41 | .cache
42 | nosetests.xml
43 | coverage.xml
44 | *,cover
45 | .hypothesis/
46 |
47 | # Translations
48 | *.mo
49 | *.pot
50 |
51 | # Django stuff:
52 | *.log
53 |
54 | # Sphinx documentation
55 | # docs/_build/
56 |
57 | # PyBuilder
58 | target/
59 |
60 | #Ipython Notebook
61 | .ipynb_checkpoints
62 |
63 | #PyCharm Project files
64 | .idea/
65 | .idea/*
--------------------------------------------------------------------------------
/AddNearAtrributesDirections/AddNearAttributesDirections-CN-V1.0.py:
--------------------------------------------------------------------------------
1 | import arcpy
2 | from arcpy import env
3 |
4 | # ---------------------------------------------------------------------------
5 | # AddNearAttributesDirections.py
6 | # Created on: 2013-04-08
7 | # Author: Zhu Liangjun
8 | # ---------------------------------------------------------------------------
9 |
10 |
11 | #################### Inputs ########################
12 | def setupNearAttributes():
13 | poly_shp = arcpy.GetParameterAsText(0)
14 | nameField = arcpy.GetParameterAsText(1)
15 | fieldName = arcpy.GetParameterAsText(2)
16 | fieldLength = arcpy.GetParameterAsText(3)
17 | isDirection = arcpy.GetParameterAsText(4)
18 | Direct = arcpy.GetParameterAsText(5)
19 | outFile = arcpy.GetParameterAsText(6)
20 | arcpy.gp.overwriteOutput = 1
21 | shpDesc = arcpy.Describe(poly_shp)
22 | env.workspace = shpDesc.Path
23 | if isDirection:
24 | AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldName,fieldLength)
25 | else:
26 | AddNearAttributes(poly_shp,nameField,outFile,fieldName,fieldLength)
27 |
28 | ################### Functions ######################
29 | def sendmsg(msg):
30 | print msg
31 | arcpy.AddMessage(msg)
32 |
33 | def CalFieldMappings(origial_shp,join_shp,nameField,fieldmappings,fieldName,Length):
34 | fieldmappings.addTable(origial_shp)
35 | AddNearPoly = arcpy.FieldMap()
36 | AddNearPoly.addInputField(join_shp,nameField)
37 | field = AddNearPoly.outputField
38 | field.name = fieldName
39 | field.aliasName = fieldName
40 | field.length = Length
41 | AddNearPoly.mergeRule = "Join"
42 | AddNearPoly.joinDelimiter = ","
43 | AddNearPoly.outputField = field
44 | fieldmappings.addFieldMap(AddNearPoly)
45 | ##sendmsg(fieldmappings.exportToString())
46 |
47 | def AddNearAttributes(poly_shp,nameField,outFile,fieldName,fieldLength):
48 | arcpy.Copy_management(poly_shp,"temp_poly.shp")
49 | fieldmappings = arcpy.FieldMappings()
50 | CalFieldMappings(poly_shp,"temp_poly.shp",nameField,fieldmappings,fieldName,fieldLength)
51 | arcpy.SpatialJoin_analysis(poly_shp,"temp_poly.shp",outFile,"JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"INTERSECT", "", "")
52 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"])
53 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],["+nameField+"]+\",\",\"\")","VB")
54 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],\",\"+["+nameField+"],\"\")","VB")
55 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],["+nameField+"],\"\")","VB")
56 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,!"+nameField+"!+',','')","PYTHON")
57 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,','+!"+nameField+"!,'')","PYTHON")
58 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,!"+nameField+"!,'')","PYTHON")
59 | arcpy.Delete_management("temp_poly.shp")
60 |
61 | def AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldName,fieldLength):
62 | ##Define temporary files
63 | polytopoint_shp = "polytopoint.shp"
64 | pointneartab = "pointneartab"
65 | polyneartab = "polyneartab.dbf"
66 |
67 | try:
68 | arcpy.FeatureToPoint_management(poly_shp, polytopoint_shp, "INSIDE")
69 | arcpy.AddXY_management(polytopoint_shp)
70 | except:
71 | sendmsg(arcpy.GetMessages())
72 | try:
73 | arcpy.GenerateNearTable_analysis(polytopoint_shp, polytopoint_shp, pointneartab, "", "NO_LOCATION", "ANGLE", "ALL", "0")
74 | arcpy.GenerateNearTable_analysis(poly_shp, poly_shp, "polyneartabTemp","0", "NO_LOCATION", "NO_ANGLE", "ALL", "0")
75 | shpDesc = arcpy.Describe(poly_shp)
76 | arcpy.TableToTable_conversion("polyneartabTemp",shpDesc.Path,polyneartab)
77 | except:
78 | sendmsg(arcpy.GetMessages())
79 | try:
80 | arcpy.AddField_management(polyneartab,"near_link","TEXT")
81 | arcpy.AddField_management(polyneartab,"NameDirec","TEXT","","",80,"","","","")
82 | arcpy.AddField_management(polyneartab,"x","DOUBLE")
83 | arcpy.AddField_management(polyneartab,"y","DOUBLE")
84 | arcpy.AddField_management(polyneartab,"angle","DOUBLE")
85 | arcpy.CalculateField_management(polyneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON")
86 | arcpy.AddField_management(pointneartab,"near_link","TEXT")
87 | arcpy.CalculateField_management(pointneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON")
88 | except:
89 | sendmsg(arcpy.GetMessages())
90 | try:
91 | arcpy.MakeTableView_management(polyneartab, "polyneartab_View")
92 | arcpy.AddJoin_management("polyneartab_View", "IN_FID", polytopoint_shp, "ORIG_FID", "KEEP_ALL")
93 | arcpy.CalculateField_management("polyneartab_View","X","!polytopoint.POINT_X!","PYTHON")
94 | arcpy.CalculateField_management("polyneartab_View","Y","!polytopoint.POINT_Y!","PYTHON")
95 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint")
96 |
97 | arcpy.AddJoin_management("polyneartab_View","NEAR_FID",polytopoint_shp,"ORIG_FID","KEEP_ALL")
98 | ##arcpy.CalculateField_management("polyneartab_View","polyneartab:NameDirec","!polytopoint."+nameField+"!","PYTHON")
99 | arcpy.CalculateField_management("polyneartab_View","NameDirec","[polytopoint."+nameField+"]","VB")
100 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint")
101 |
102 | arcpy.MakeTableView_management(pointneartab, "pointneartab_View")
103 | arcpy.AddJoin_management("polyneartab_View","NEAR_LINK","pointneartab_View","NEAR_LINK","KEEP_ALL")
104 | arcpy.CalculateField_management("polyneartab_View","ANGLE","!pointneartab:NEAR_ANGLE!","PYTHON")
105 |
106 | expression = "DefAngle(float(!angle!),str(!NameDirec!))"
107 | if Direct == "Four":
108 | codeblock = """ if Abs ( [angle] ) <= 45 then
109 | temp = [NameDirec]+\"(东)\"
110 | elseif [angle] > 45 and [angle] <= 135 then
111 | temp = [NameDirec]+\"(北)\"
112 | elseif Abs ( [angle] ) > 135 then
113 | temp = [NameDirec]+\"(西)\"
114 | else
115 | temp = [NameDirec]+\"(南)\"
116 | end if """
117 | else:
118 | codeblock = """if Abs([angle])<=22.5 then
119 | temp = [NameDirec]+\"(东)\"
120 | elseif [angle]>22.5 and [angle]<=67.5 then
121 | temp = [NameDirec]+\"(东北)\"
122 | elseif [angle]>67.5 and [angle]<=112.5 then
123 | temp = [NameDirec]+\"(北)\"
124 | elseif [angle]>112.5 and [angle]<=157.5 then
125 | temp = [NameDirec]+\"(西北)\"
126 | elseif Abs([angle])>157.5 then
127 | temp = [NameDirec]+\"(西)\"
128 | elseif [angle]>-157.5 and [angle]<=-112.5 then
129 | temp = [NameDirec]+\"(西南)\"
130 | elseif [angle]>-112.5 and [angle]<=-67.5 then
131 | temp = [NameDirec]+\"(南)\"
132 | else
133 | temp = [NameDirec]+\"(东南)\"
134 | end if"""
135 | arcpy.CalculateField_management(polyneartab,"NameDirec","temp","VB",codeblock)
136 | except:
137 | sendmsg(arcpy.GetMessages())
138 | ## Add XY data
139 | try:
140 | spatialRef = arcpy.Describe(poly_shp).spatialReference
141 | arcpy.MakeXYEventLayer_management(polyneartab,"x","y","tempLayer",spatialRef)
142 | arcpy.CopyFeatures_management("tempLayer","point.shp")
143 | except:
144 | sendmsg(arcpy.GetMessages())
145 | try:
146 | ## Spatial Join
147 | fieldmappings = arcpy.FieldMappings()
148 | CalFieldMappings(poly_shp,"point.shp","NameDirec",fieldmappings,fieldName,fieldLength)
149 | arcpy.SpatialJoin_analysis(poly_shp, "point.shp", outFile, "JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"CONTAINS", "", "")
150 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"])
151 | except:
152 | sendmsg(arcpy.GetMessages())
153 |
154 | ## Delete process data
155 | try:
156 | arcpy.Delete_management("polyneartabTemp")
157 | arcpy.Delete_management(pointneartab)
158 | arcpy.Delete_management(polyneartab)
159 | arcpy.Delete_management(polytopoint_shp)
160 | arcpy.Delete_management("point.shp")
161 | except:
162 | sendmsg(arcpy.GetMessages())
163 |
164 | if __name__ == '__main__':
165 | setupNearAttributes()
166 |
--------------------------------------------------------------------------------
/AddNearAtrributesDirections/AddNearAttributesDirections-EN-V1.0.py:
--------------------------------------------------------------------------------
1 | import arcpy
2 | from arcpy import env
3 | ##import string,sys
4 | ##reload(sys)
5 | ##sys.setdefaultencoding('utf8')
6 | ## ---------------------------------------------------------------------------
7 | # AddNearAttributesDirections.py
8 | # Created on: 2013-04-08
9 | # Author: Zhu Liangjun
10 | # ---------------------------------------------------------------------------
11 | #################### Inputs ########################
12 | def setupNearAttributes():
13 | poly_shp = arcpy.GetParameterAsText(0)
14 | nameField = arcpy.GetParameterAsText(1)
15 | fieldLength = arcpy.GetParameterAsText(2)
16 | isDirection = arcpy.GetParameterAsText(3)
17 | Direct = arcpy.GetParameterAsText(4)
18 | outFile = arcpy.GetParameterAsText(5)
19 | arcpy.gp.overwriteOutput = 1
20 | shpDesc = arcpy.Describe(poly_shp)
21 | env.workspace = shpDesc.Path
22 | if isDirection:
23 | AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldLength)
24 | else:
25 | AddNearAttributes(poly_shp,nameField,outFile,fieldLength)
26 |
27 | ################### Functions ######################
28 | def sendmsg(msg):
29 | print msg
30 | arcpy.AddMessage(msg)
31 |
32 | def CalFieldMappings(origial_shp,join_shp,nameField,fieldmappings,Length):
33 | fieldmappings.addTable(origial_shp)
34 | AddNearPoly = arcpy.FieldMap()
35 | AddNearPoly.addInputField(join_shp,nameField)
36 | field = AddNearPoly.outputField
37 | field.name = "NearPoly"
38 | field.aliasName = "NearPoly"
39 | field.length = Length
40 | AddNearPoly.mergeRule = "Join"
41 | AddNearPoly.joinDelimiter = ","
42 | AddNearPoly.outputField = field
43 | fieldmappings.addFieldMap(AddNearPoly)
44 | ##sendmsg(fieldmappings.exportToString())
45 |
46 | def AddNearAttributes(poly_shp,nameField,outFile,fieldLength):
47 | arcpy.Copy_management(poly_shp,"temp_poly.shp")
48 | fieldmappings = arcpy.FieldMappings()
49 | CalFieldMappings(poly_shp,"temp_poly.shp",nameField,fieldmappings,fieldLength)
50 | arcpy.SpatialJoin_analysis(poly_shp,"temp_poly.shp",outFile,"JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"INTERSECT", "", "")
51 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"])
52 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],["+nameField+"]+\",\",\"\")","VB")
53 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],\",\"+["+nameField+"],\"\")","VB")
54 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],["+nameField+"],\"\")","VB")
55 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,!"+nameField+"!+',','')","PYTHON")
56 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,','+!"+nameField+"!,'')","PYTHON")
57 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,!"+nameField+"!,'')","PYTHON")
58 | arcpy.Delete_management("temp_poly.shp")
59 |
60 | def AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldLength):
61 | ##Define temporary files
62 | polytopoint_shp = "polytopoint.shp"
63 | pointneartab = "pointneartab"
64 | polyneartab = "polyneartab.dbf"
65 |
66 | try:
67 | arcpy.FeatureToPoint_management(poly_shp, polytopoint_shp, "INSIDE")
68 | arcpy.AddXY_management(polytopoint_shp)
69 | except:
70 | sendmsg(arcpy.GetMessages())
71 | try:
72 | arcpy.GenerateNearTable_analysis(polytopoint_shp, polytopoint_shp, pointneartab, "", "NO_LOCATION", "ANGLE", "ALL", "0")
73 | arcpy.GenerateNearTable_analysis(poly_shp, poly_shp, "polyneartabTemp","0", "NO_LOCATION", "NO_ANGLE", "ALL", "0")
74 | shpDesc = arcpy.Describe(poly_shp)
75 | arcpy.TableToTable_conversion("polyneartabTemp",shpDesc.Path,polyneartab)
76 | except:
77 | sendmsg(arcpy.GetMessages())
78 | try:
79 | arcpy.AddField_management(polyneartab,"near_link","TEXT")
80 | arcpy.AddField_management(polyneartab,"NameDirec","TEXT","","",80,"","","","")
81 | arcpy.AddField_management(polyneartab,"x","DOUBLE")
82 | arcpy.AddField_management(polyneartab,"y","DOUBLE")
83 | arcpy.AddField_management(polyneartab,"angle","DOUBLE")
84 | arcpy.CalculateField_management(polyneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON")
85 | arcpy.AddField_management(pointneartab,"near_link","TEXT")
86 | arcpy.CalculateField_management(pointneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON")
87 | except:
88 | sendmsg(arcpy.GetMessages())
89 | try:
90 | arcpy.MakeTableView_management(polyneartab, "polyneartab_View")
91 | arcpy.AddJoin_management("polyneartab_View", "IN_FID", polytopoint_shp, "ORIG_FID", "KEEP_ALL")
92 | arcpy.CalculateField_management("polyneartab_View","X","!polytopoint.POINT_X!","PYTHON")
93 | arcpy.CalculateField_management("polyneartab_View","Y","!polytopoint.POINT_Y!","PYTHON")
94 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint")
95 |
96 | arcpy.AddJoin_management("polyneartab_View","NEAR_FID",polytopoint_shp,"ORIG_FID","KEEP_ALL")
97 | ##arcpy.CalculateField_management("polyneartab_View","polyneartab:NameDirec","!polytopoint."+nameField+"!","PYTHON")
98 | arcpy.CalculateField_management("polyneartab_View","NameDirec","[polytopoint."+nameField+"]","VB")
99 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint")
100 |
101 | arcpy.MakeTableView_management(pointneartab, "pointneartab_View")
102 | arcpy.AddJoin_management("polyneartab_View","NEAR_LINK","pointneartab_View","NEAR_LINK","KEEP_ALL")
103 | arcpy.CalculateField_management("polyneartab_View","ANGLE","!pointneartab:NEAR_ANGLE!","PYTHON")
104 |
105 | expression = "DefAngle(float(!angle!),str(!NameDirec!))"
106 | if Direct == "Four":
107 | codeblock = """def DefAngle(angle,name):
108 | if abs(angle)<=45:
109 | return name+'(East)'
110 | elif angle>45 and angle<=135:
111 | return name+'(North)'
112 | elif abs(angle)>135:
113 | return name+'(West)'
114 | else:
115 | return name+'(South)'"""
116 | else:
117 | codeblock = """def DefAngle(angle,name):
118 | if abs(angle)<=22.5:
119 | return name+'(East)'
120 | elif angle>22.5 and angle<=67.5:
121 | return name+'(NorthEast)'
122 | elif angle>67.5 and angle<=112.5:
123 | return name+'(North)'
124 | elif angle>112.5 and angle<=157.5:
125 | return name+'(NorthWest)'
126 | elif abs(angle)>157.5:
127 | return name+'(West)'
128 | elif angle>-157.5 and angle<=-112.5:
129 | return name+'(SouthWest)'
130 | elif angle>-112.5 and angle<=-67.5:
131 | return name+'(South)'
132 | else:
133 | return name+'(SouthEast)'"""
134 | ##codeblock = (codeblock.decode('utf-8')).encode('gb2312')
135 | arcpy.CalculateField_management(polyneartab,"NameDirec",expression,"PYTHON",codeblock)
136 | except:
137 | sendmsg(arcpy.GetMessages())
138 | ## Add XY data
139 | try:
140 | spatialRef = arcpy.Describe(poly_shp).spatialReference
141 | arcpy.MakeXYEventLayer_management(polyneartab,"x","y","tempLayer",spatialRef)
142 | arcpy.CopyFeatures_management("tempLayer","point.shp")
143 | except:
144 | sendmsg(arcpy.GetMessages())
145 | try:
146 | ## Spatial Join
147 | fieldmappings = arcpy.FieldMappings()
148 | CalFieldMappings(poly_shp,"point.shp","NameDirec",fieldmappings,fieldLength)
149 | arcpy.SpatialJoin_analysis(poly_shp, "point.shp", outFile, "JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"CONTAINS", "", "")
150 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"])
151 | except:
152 | sendmsg(arcpy.GetMessages())
153 |
154 | ## Delete process data
155 | try:
156 | arcpy.Delete_management("polyneartabTemp")
157 | arcpy.Delete_management(pointneartab)
158 | arcpy.Delete_management(polyneartab)
159 | arcpy.Delete_management(polytopoint_shp)
160 | arcpy.Delete_management("point.shp")
161 | except:
162 | sendmsg(arcpy.GetMessages())
163 |
164 | if __name__ == '__main__':
165 | setupNearAttributes()
166 |
--------------------------------------------------------------------------------
/AddNearAtrributesDirections/AddNearAttributesDirections.tbx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/AddNearAtrributesDirections/AddNearAttributesDirections.tbx
--------------------------------------------------------------------------------
/CSV2PtsShp/CSV2PtsShp.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | ## Author : Liangjun Zhu
3 | ## Email : crazyzlj@gmail.com
4 | ## Date : 2015-1-23
5 | ## Usage : Convert a .csv filetype points file to a vector shapefile
6 | ## put this .py file in the same folder, input the file name and
7 | ## x,y column name.
8 | import os,sys
9 | import arcpy
10 | from arcpy import env
11 |
12 | def currentPath():
13 | path = sys.path[0]
14 | if os.path.isdir(path):
15 | return path
16 | elif os.path.isfile(path):
17 | return os.path.dirname(path)
18 |
19 | def CSV2PtsShp(CSVFile,X,Y):
20 | env.workspace = os.path.dirname(CSVFile)
21 | PtsShp = os.path.basename(CSVFile)
22 | PtsShp = PtsShp.split('.')[-2] + ".shp"
23 | print PtsShp
24 | try:
25 | arcpy.MakeXYEventLayer_management(CSVFile,X,Y,"tempLayer","","")
26 | arcpy.CopyFeatures_management("tempLayer",PtsShp)
27 | except:
28 | print arcpy.GetMessages()
29 | arcpy.AddMessage(arcpy.GetMessages())
30 |
31 | print os.path.dirname(CSVFile)
32 | print "%s Convert to Shp Done!" % CSVFile
33 |
34 | if __name__ == '__main__':
35 | CSVName = "designed_samples.csv"
36 | XName = "RecommendedX"
37 | YName = "RecommendedY"
38 | currFolder = currentPath()
39 | CSVFile = currFolder + os.sep + CSVName
40 | CSV2PtsShp(CSVFile,XName,YName)
41 |
--------------------------------------------------------------------------------
/CSV2PtsShp/designed_samples.csv:
--------------------------------------------------------------------------------
1 | Stability,PatternID,TotalArea,RecommendedX,RecommendedY,Ave.Membership
2 | 3,1,3102,431103.875000,3488299.000000,0.987639
3 | 3,1,3102,429103.875000,3469299.000000,0.986404
4 | 3,1,3102,697103.875000,3415299.000000,0.986380
5 | 3,2,996,437103.875000,3472299.000000,0.996329
6 | 3,2,996,466103.875000,3458299.000000,0.993498
7 | 3,2,996,623103.875000,3619299.000000,0.993234
8 | 3,5,768,616103.875000,3338299.000000,0.964042
9 | 3,5,768,415103.875000,3410299.000000,0.962403
10 | 3,5,768,390103.875000,3450299.000000,0.961458
11 | 3,4,376,728103.875000,3408299.000000,0.866977
12 | 3,4,376,363103.875000,3502299.000000,0.866923
13 | 3,4,376,587103.875000,3379299.000000,0.863143
14 | 3,3,150,420103.875000,3494299.000000,0.830402
15 | 3,3,150,361103.875000,3468299.000000,0.829132
16 | 3,3,150,425103.875000,3490299.000000,0.827112
17 | 3,6,8,450103.875000,3450299.000000,0.810822
18 | 3,6,8,401103.875000,3449299.000000,0.807702
19 | 3,6,8,389103.875000,3402299.000000,0.807020
--------------------------------------------------------------------------------
/DelRepeatFeatures/DelRepeat.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf8 -*-
2 | #-------------------------------------------------------------------------------
3 | # Name: DelRepeat
4 | # Description:
5 | #
6 | # Created: 2013-3-29
7 | # Author: gjl
8 | # Contact: gjl
9 | #-------------------------------------------------------------------------------
10 | import arcpy
11 |
12 | def DelRepeat(inputFeatureClass, outputFeatureClass):
13 | #copy feature
14 | fc = outputFeatureClass
15 | arcpy.CopyFeatures_management(inputFeatureClass, fc)
16 |
17 | #unique id
18 | if len(arcpy.ListFields(fc, "TempId")) <= 0:
19 | arcpy.AddField_management(fc, "TempId", "LONG")
20 | if len(arcpy.ListFields(fc, "TempMark")) <= 0:
21 | arcpy.AddField_management(fc, "TempMark", "STRING")
22 | num = 1
23 | cursor = arcpy.UpdateCursor(fc)
24 | for row in cursor:
25 | row.TempId = num
26 | row.TempMark = "N"
27 | num +=1
28 | cursor.updateRow(row)
29 | del row
30 | del cursor
31 |
32 | #find repeat polygon
33 | repeat = []
34 | cursor1 = arcpy.SearchCursor(fc)
35 | for row1 in cursor1:
36 | if row1.TempMark == "N":
37 | geom1 = row1.shape
38 | cursor2 = arcpy.UpdateCursor(fc)
39 | for row2 in cursor2:
40 | geom2 = row2.shape
41 | if geom1.equals(geom2) and (row2.TempId != row1.TempId):
42 | row2.TempMark = "Y"
43 | repeat.append(row2.TempId)
44 | cursor2.updateRow(row2)
45 | del row2
46 | del cursor2
47 | del row1
48 | del cursor1
49 | print repeat
50 |
51 | #delete repeat polygon
52 | arcpy.MakeFeatureLayer_management(fc, "layer")
53 | sql = ""
54 | for r in repeat:
55 | if sql == "":
56 | sql += "\"TempId\" = " + str(r)
57 | else:
58 | sql += " OR \"TempId\" = " + str(r)
59 | print sql
60 | arcpy.SelectLayerByAttribute_management("layer", "NEW_SELECTION", sql)
61 | arcpy.DeleteFeatures_management("layer")
62 | arcpy.DeleteField_management(fc, "TempId")
63 | arcpy.DeleteField_management(fc, "TempMark")
64 |
65 | def main():
66 | inFC = arcpy.GetParameterAsText(0)
67 | outFC = arcpy.GetParameterAsText(1)
68 | DelRepeat(inFC, outFC)
69 |
70 | if __name__ == "__main__":
71 | main()
--------------------------------------------------------------------------------
/DownloadModis/download_modis_nasa_earthdata.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Author: Liangjun Zhu
3 | # Date : 2017-3-13
4 | # Email : zlj@lreis.ac.cn
5 | # Blog : zhulj.net
6 |
7 | from utils import *
8 | import urllib2
9 | from bs4 import BeautifulSoup
10 |
11 | import ssl
12 | from functools import wraps
13 |
14 |
15 | def sslwrap(func):
16 | @wraps(func)
17 | def bar(*args, **kw):
18 | kw['ssl_version'] = ssl.PROTOCOL_TLSv1
19 | return func(*args, **kw)
20 |
21 | return bar
22 |
23 |
24 | ssl.wrap_socket = sslwrap(ssl.wrap_socket)
25 |
26 |
27 | def chunk_report(mbytes_so_far, total_size):
28 | if total_size > 0:
29 | percent = float(mbytes_so_far) / total_size
30 | percent = round(percent * 100, 2)
31 | sys.stdout.write("Downloaded %.3f of %.3f Mb (%0.2f%%)\r" %
32 | (mbytes_so_far, total_size, percent))
33 | if mbytes_so_far >= total_size:
34 | sys.stdout.write('\n')
35 | else:
36 | pass # currently, do nothing
37 |
38 |
39 | def chunk_read(response, chunk_size=8192, savepath=None, report_hook=None):
40 | try:
41 | total_size = response.info().getheader('content-length').strip()
42 | total_size = float(total_size) / 1024. / 1024.
43 | except AttributeError:
44 | total_size = 0.
45 | bytes_so_far = 0
46 |
47 | while True:
48 | chunk = response.read(chunk_size)
49 | bytes_so_far += len(chunk) / 1024. / 1024.
50 | if not chunk:
51 | break
52 | if savepath is not None:
53 | savedata2file(chunk, savepath)
54 | if report_hook:
55 | report_hook(bytes_so_far, total_size)
56 | return bytes_so_far
57 |
58 |
59 | def downMODISfromNASAEarthdata(productname, **kwargs):
60 | from cookielib import CookieJar
61 | downUrl = 'https://e4ftl01.cr.usgs.gov/MOLT/'
62 | prefix = productname.split('.')[0]
63 | version = productname.split('.')[1]
64 | usrname = ''
65 | pwd = ''
66 | startdate = datetime.datetime.today()
67 | enddate = datetime.datetime.today()
68 | h = 0
69 | v = 8
70 | deltaday = 8
71 | outpath = ''
72 | # try to get the required key-values, or throw exception
73 | try:
74 | usrname = kwargs["usrname"]
75 | pwd = kwargs["pwd"]
76 | startdate = kwargs["startdate"]
77 | enddate = kwargs["enddate"]
78 | h = kwargs["h"]
79 | v = kwargs["v"]
80 | deltaday = kwargs["deltaday"]
81 | outpath = kwargs["workspace"]
82 | except KeyError:
83 | print ("downMODISfromNASAEarthdata function must have the usrname, pwd, startdate, and enddate args.")
84 | # try to get optional key-values
85 | logfile = None
86 | if 'log' in kwargs.keys():
87 | logfile = kwargs['log']
88 | delfile(logfile)
89 |
90 | authorizeUrl = "https://urs.earthdata.nasa.gov"
91 | # Create a password manager to deal with the 401 response that is returned from authorizeUrl
92 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
93 | password_manager.add_password(None, authorizeUrl, usrname, pwd)
94 | cookie_jar = CookieJar()
95 | opener = urllib2.build_opener(
96 | urllib2.HTTPBasicAuthHandler(password_manager),
97 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see
98 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses
99 | urllib2.HTTPCookieProcessor(cookie_jar))
100 | urllib2.install_opener(opener)
101 |
102 | tmpdate = startdate
103 | while tmpdate <= enddate:
104 | curdownUrl = downUrl + productname + '/' + tmpdate.strftime("%Y.%m.%d") + '/'
105 | print curdownUrl
106 | itemsList = read_url(curdownUrl)
107 | curItem = prefix + '.A%d%03d.h%02dv%02d.' % (tmpdate.year, doy(tmpdate), h, v) + version
108 | found, curItemUrls = locateStringInList(curItem, itemsList)
109 | if not found:
110 | print ("File %s not found!" % curItem)
111 | continue
112 | for curItemUrl in curItemUrls:
113 | tmpfile = outpath + os.sep + os.path.split(curItemUrl)[1]
114 | delfile(tmpfile)
115 | try:
116 | print2log(curItemUrl, logfile = logfile)
117 | request = urllib2.Request(curItemUrl)
118 | response = urllib2.urlopen(request)
119 | chunk_read(response, savepath = tmpfile, report_hook = chunk_report)
120 | except urllib2.HTTPError or urllib2.URLError, e:
121 | print e.code
122 | tmpdate += datetime.timedelta(days = deltaday)
123 |
124 |
125 | def read_url(url):
126 | url = url.replace(" ", "%20")
127 | try:
128 | req = urllib2.Request(url)
129 | a = urllib2.urlopen(req).read()
130 | soup = BeautifulSoup(a, 'html.parser')
131 | x = (soup.find_all('a'))
132 | allurl = []
133 | for i in x:
134 | file_name = i.extract().get_text()
135 | url_new = url + file_name
136 | url_new = url_new.replace(" ", "%20")
137 | allurl.append(url_new)
138 | return allurl
139 | except urllib2.HTTPError or urllib2.URLError, e:
140 | print e.code
141 |
142 |
143 | if __name__ == '__main__':
144 | DOWN_PATH = r'D:\tmp'
145 | product = "MOD15A2H.006"
146 | usrname = 'your_user_name'
147 | pwd = 'your_password'
148 | startdate = [2002, 2, 18] # year, month, day
149 | enddate = [2002, 3, 5]
150 | deltaday = 8
151 | h = 1
152 | v = 11
153 | log = DOWN_PATH + os.sep + product + '.log'
154 | downMODISfromNASAEarthdata(product, usrname = usrname, pwd = pwd,
155 | startdate = list2datetime(startdate),
156 | enddate = list2datetime(enddate),
157 | deltaday = deltaday, h = h, v = v,
158 | workspace = DOWN_PATH, log = log)
159 |
--------------------------------------------------------------------------------
/DownloadModis/utils.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Author: Liangjun Zhu
3 | # Date : 2017-1-11
4 | # Email : zlj@lreis.ac.cn
5 | # Blog : zhulj.net
6 |
7 | import os
8 | import sys
9 | import time
10 | import datetime
11 | import subprocess
12 |
13 |
14 | def currentPath():
15 | path = sys.path[0]
16 | if os.path.isdir(path):
17 | return path
18 | elif os.path.isfile(path):
19 | return os.path.dirname(path)
20 |
21 |
22 | def mkdir(dirname):
23 | if not os.path.isdir(dirname):
24 | os.mkdir(dirname)
25 |
26 |
27 | def savedata2file(data, filepath):
28 | with open(filepath, "ab") as code:
29 | code.write(data)
30 |
31 |
32 | def StringMatch(str1, str2):
33 | if str1.lower() == str2.lower():
34 | return True
35 | else:
36 | return False
37 |
38 |
39 | def list2datetime(datelist):
40 | try:
41 | if len(datelist) == 1:
42 | return datetime.datetime(datelist[0])
43 | elif len(datelist) == 2:
44 | return datetime.datetime(datelist[0], datelist[1])
45 | elif len(datelist) == 3:
46 | return datetime.datetime(datelist[0], datelist[1], datelist[2])
47 | elif len(datelist) == 4:
48 | return datetime.datetime(datelist[0], datelist[1], datelist[2], datelist[3])
49 | elif len(datelist) == 5:
50 | return datetime.datetime(datelist[0], datelist[1], datelist[2], datelist[3], datelist[4])
51 | except TypeError:
52 | print ("Invalid inputs for datetime!")
53 |
54 |
55 | def isfileexist(filepath):
56 | if os.path.exists(filepath) and os.path.isfile(filepath):
57 | return True
58 | else:
59 | return False
60 |
61 |
62 | def delfile(filepath):
63 | if isfileexist(filepath):
64 | os.remove(filepath)
65 |
66 |
67 | def IsLeapYear(year):
68 | if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
69 | return True
70 | else:
71 | return False
72 |
73 |
74 | def GetDayNumber(year, month):
75 | if month in [1, 3, 5, 7, 8, 10, 12]:
76 | return 31
77 | elif month in [4, 6, 9, 11]:
78 | return 30
79 | elif IsLeapYear(year):
80 | return 29
81 | else:
82 | return 28
83 |
84 |
85 | def doy(dt):
86 | sec = time.mktime(dt.timetuple())
87 | t = time.localtime(sec)
88 | return t.tm_yday
89 |
90 |
91 | def print2log(msg, print2screen=True, logfile=None):
92 | if logfile is not None:
93 | f = open(logfile, 'a')
94 | f.write(msg)
95 | f.close()
96 | if print2screen:
97 | print (msg)
98 |
99 |
100 | def isnumerical(x):
101 | try:
102 | xx = float(x)
103 | except TypeError:
104 | return False
105 | except ValueError:
106 | return False
107 | except 'Exception':
108 | return False
109 | else:
110 | return True
111 |
112 |
113 | def runcommand(commands):
114 | """
115 | Execute external command, and return the output lines list
116 | :param commands: string or list
117 | :return: output lines
118 | """
119 | print (commands)
120 | use_shell = True
121 | if isinstance(commands, list) or isinstance(commands, tuple):
122 | use_shell = False
123 | process = subprocess.Popen(commands, shell = use_shell, stdout = subprocess.PIPE, stdin = open(os.devnull),
124 | stderr = subprocess.STDOUT, universal_newlines = True)
125 | return process.stdout.readlines()
126 |
127 |
128 | def zipfiles(filenames, zip_file):
129 | commands = ['python', '-m', 'zipfile', '-c', zip_file]
130 | if filenames:
131 | for filename in filenames:
132 | commands.append(filename)
133 | runcommand(commands)
134 |
135 |
136 | def locateStringInList(str, strlist):
137 | found = False
138 | foundstr = []
139 | for tmpstr in strlist:
140 | if str in tmpstr:
141 | found = True
142 | foundstr.append(tmpstr)
143 | return found, foundstr
144 |
--------------------------------------------------------------------------------
/ExtractMultiValue2Zones/ExtMultiVal2Polygon.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/ExtractMultiValue2Zones/ExtMultiVal2Polygon.py
--------------------------------------------------------------------------------
/ExtractMultiValue2Zones/ExtractMultiValue2Zones.py:
--------------------------------------------------------------------------------
1 | """
2 | Tool Name: MultiValue2Zones
3 | Source Code: MultiValue2Zones.py
4 | Version: v1.0 based on ArcGIS 10.x
5 | Author: Liangjun Zhu
6 | Contact: crazyzlj@gmail.com
7 | Start Date: 2012/12/14
8 | Revised Date : 2015/1/16
9 |
10 | This script will statistic the value of given rasters within the
11 | zones of another polygon shapefile and report the results to a
12 | CSV file.
13 | This script can calculate values included "MEAN","MAJORITY",
14 | "MAXIMUM","MEDIAN","MINIMUM","MINORITY","RANGE","STD","SUM",
15 | "VARIETY". Each raster's value will be appended to the origin
16 | shapefile's attribute table and named by the corresponding
17 | raster's name.
18 | """
19 | ################### Imports ########################
20 | import os,sys,arcpy,string
21 | from arcpy.sa import *
22 | from arcpy.management import *
23 | from arcpy import env
24 |
25 | #################### Inputs ########################
26 | def currentPath():
27 | path = sys.path[0]
28 | if os.path.isdir(path):
29 | return path
30 | elif os.path.isfile(path):
31 | return os.path.dirname(path)
32 |
33 | def setupMultiVal2Poly():
34 |
35 | ## The default set is: 1. DEM in workspace\\DEM; 2. params layers in workspace\\params;
36 | ## 3. Reclassify DEM for statistics zones ##
37 |
38 | demfolder = currentPath() + "\\DEM"
39 | paramfolder = currentPath() + "\\params"
40 | resultfolder = currentPath() + "\\results"
41 | #print resultfolder
42 | if not os.path.exists(resultfolder):
43 | os.mkdir(resultfolder)
44 | if os.path.exists(demfolder):
45 | arcpy.env.workspace = demfolder
46 | else:
47 | print "Please make a 'DEM' folder which contains the DEM file."
48 | raw_input()
49 | exit()
50 | arcpy.gp.overwriteOutput = 1
51 | arcpy.CheckOutExtension("Spatial")
52 | if arcpy.ListRasters("*","ALL") == []:
53 | print "Please check the DEM folder to make sure the existence of DEM raster file."
54 | raw_input()
55 | exit()
56 | else:
57 | cls = range(0,8100,100)
58 | classifyIdx = []
59 | for i in range(len(cls)-1):
60 | classifyIdx.append([cls[i],cls[i+1],i])
61 | #classifyIdx.append([7000,8000,len(cls)])
62 | print "The reclassification of DEM is :"
63 | print classifyIdx
64 |
65 | for DEMfile in arcpy.ListRasters("*","ALL"):
66 | print "Reclassify the DEM raster..."
67 | outReclass = Reclassify(DEMfile, "Value",RemapRange(classifyIdx))
68 | DEMcls = resultfolder + "\\DEMcls"
69 | outReclass.save(DEMcls)
70 | DEMclsShp = resultfolder + "\\DEMcls.shp"
71 | arcpy.RasterToPolygon_conversion(DEMcls, DEMclsShp, "NO_SIMPLIFY","VALUE")
72 | DEMclsDis = resultfolder + "\\DEMclsDis.shp"
73 | arcpy.Dissolve_management(DEMclsShp,DEMclsDis,"GRIDCODE","","MULTI_PART","")
74 | #break
75 |
76 | OriginShp = DEMclsDis
77 | ZoneField = "GRIDCODE"
78 | IgnoreNodata = "DATA"
79 | SummarizeVal = ["MEAN","MAJORITY","MAXIMUM","MEDIAN","MINIMUM","RANGE","STD","SUM","VARIETY"]
80 | JoinType = ""
81 | RasterFolder = paramfolder
82 | outFolder = resultfolder
83 | ## End the default setting ##
84 |
85 | ## If you want to use this tool in Arctoolbox, the code above should be replaced by below ##
86 | OriginShp = arcpy.GetParameterAsText(0)
87 | ZoneField = arcpy.GetParameterAsText(1)
88 | IgnoreNodata = arcpy.GetParameterAsText(2)
89 | SummarizeVal = arcpy.GetParameterAsText(3)
90 | JoinType = arcpy.GetParameterAsText(4)
91 | RasterFolder = arcpy.GetParameterAsText(5)
92 | outFolder = arcpy.GetParameterAsText(6)
93 | outFileName = arcpy.GetParameterAsText(7)
94 | ## End of code for Arctoolbox's input information ##
95 | arcpy.gp.overwriteOutput = 1
96 | for SummVal in SummarizeVal:
97 | print "Calculating the Index of %s..." % SummVal
98 | MultiVal2Poly(OriginShp,ZoneField,IgnoreNodata,SummVal,JoinType,RasterFolder,outFolder,SummVal)
99 | print "All mission done sucessfully!"
100 |
101 | ################### Functions ######################
102 |
103 | def ListFields(FileLayer):
104 | fields = arcpy.gp.listFields(FileLayer)
105 | fieldList = []
106 | for field in fields:
107 | fieldList.append([str(field.name),str(field.type)])
108 | return fieldList
109 | def AddCalDelField(ShpFile,AddName,CalName,FieldDataType):
110 | arcpy.AddField_management(ShpFile,AddName,FieldDataType)
111 | arcpy.CalculateField_management(ShpFile,AddName,"!"+CalName+"!","PYTHON")
112 | arcpy.DeleteField_management(ShpFile,CalName)
113 | def SaveShpAsCSV(ShpFile,OutDir,OutputName):
114 | fields = arcpy.gp.listFields(ShpFile)
115 | fieldList2 = []
116 | for field in fields:
117 | if field.name != "Shape":
118 | fieldList2.append(str(field.name))
119 | #print fieldList2
120 | try:
121 | if not os.path.exists(OutDir+"\\"+OutputName+".csv"):
122 | arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES")
123 | else:
124 | os.remove(OutDir+"\\"+OutputName+".csv")
125 | arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES")
126 | except:
127 | errorStr = arcpy.gp.GetMessages()
128 | def MultiVal2Poly(OriginShp,ZoneField,IgnoreNodata,SummarizeVal,JoinType,RasterFolder,outFolder,outFileName):
129 | if outFolder == "":
130 | outFolder == RasterFolder
131 | if os.path.exists(RasterFolder):
132 | arcpy.env.workspace = RasterFolder
133 | else:
134 | print "Please make a 'params' folder which contains the parameter files."
135 | raw_input()
136 | exit()
137 | arcpy.Copy_management(OriginShp,"p.shp")
138 | DropFields = ["ZonSAT_OID","ZonSAT_STU","ZonSAT_ZON","ZonSAT_GRI","ZonSAT_COU","ZonSAT_ARE"]
139 | if arcpy.ListRasters("*","ALL") == []:
140 | print "Please check the DEM folder to make sure the existence of DEM raster file."
141 | raw_input()
142 | exit()
143 | for rasterFile in arcpy.ListRasters("*","ALL"):
144 | print " Handing the %s parameter raster " % rasterFile
145 | zoneShp = "p.shp"
146 | curFileName = os.path.splitext(rasterFile)[0]
147 | try:
148 | arcpy.CheckOutExtension("Spatial")
149 | ZonSAT = ZonalStatisticsAsTable(zoneShp,ZoneField,rasterFile,"ZonSAT.dbf",IgnoreNodata,SummarizeVal)
150 | arcpy.MakeFeatureLayer_management(zoneShp,"tempLayer")
151 | arcpy.AddJoin_management("tempLayer",ZoneField,"ZonSAT.dbf",ZoneField,JoinType)
152 | arcpy.CopyFeatures_management("tempLayer",curFileName)
153 | curFileNameShp = curFileName+".shp"
154 | arcpy.Delete_management("ZonSAT.dbf")
155 | except:
156 | arcpy.gp.GetMessages()
157 | try:
158 | AddCalDelField(curFileNameShp,curFileName,"ZonSAT_"+SummarizeVal[0:3],"DOUBLE")
159 | arcpy.DeleteField_management(curFileNameShp,DropFields)
160 | for field in ListFields(curFileNameShp):
161 | #print field
162 | if not(field[0]=="FID" or field[0]=="Shape" or field[0]==curFileName):
163 | if field[0][2:] == "GRIDCODE":
164 | AddCalDelField(curFileNameShp,field[0][2:],field[0],"INTEGER")
165 | else:
166 | AddCalDelField(curFileNameShp,field[0][2:],field[0],field[1])
167 | #print "ZonSAT_"+SummarizeVal[0:3]
168 | arcpy.Delete_management(zoneShp)
169 | arcpy.Copy_management(curFileNameShp,"p.shp")
170 | arcpy.Delete_management(curFileNameShp)
171 | except:
172 | arcpy.gp.GetMessages()
173 | arcpy.Copy_management("p.shp",outFolder+"\\"+outFileName+".shp")
174 | arcpy.Delete_management("p.shp")
175 | print " Saving the Attribute table to CSV file..."
176 | SaveShpAsCSV(outFolder+"\\"+outFileName+".shp",outFolder,outFileName)
177 |
178 | if __name__ == '__main__':
179 | setupMultiVal2Poly()
180 | raw_input()
181 | exit()
--------------------------------------------------------------------------------
/ExtractMultiValue2Zones/ExtractMultiValue2Zones.tbx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/ExtractMultiValue2Zones/ExtractMultiValue2Zones.tbx
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/.idea/ExtractRasterByMultiPolygon.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | C:\Users\ZhuLJ\AppData\Roaming\Subversion
16 |
17 |
18 |
19 |
20 |
21 |
22 |
27 |
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 | 1453554611822
32 |
33 | 1453554611822
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/ExtractRasterByMultiPolygon/ExtractRasterByMultiPolygon.py:
--------------------------------------------------------------------------------
1 |
2 | # coding=utf-8
3 | # Function : Extract Raster By Mask of MultPolygon Shapefile.
4 | # Created By: Liangjun Zhu
5 | # Date : 1/23/16
6 | # Email : zlj@lreis.ac.cn
7 | #
8 | import os
9 | import arcpy
10 | from arcpy import env
11 |
12 | def ListFieldValues(fileLayer, fName):
13 | fields = arcpy.gp.listFields(fileLayer)
14 | fieldValues = []
15 | flag = False
16 | for field in fields:
17 | if str(field.name) == fName:
18 | flag = True
19 | if flag:
20 | rowCursor = arcpy.SearchCursor(fileLayer)
21 | for row in rowCursor:
22 | fieldValues.append(row.getValue(fName))
23 | return (fieldValues, flag)
24 | def ExtractRasterByMultiPolygon(shpFile, filedName, originRasterFile, bufferSize, suffix, outPath):
25 | ## Set environment settings
26 | if not os.path.isdir(outPath): ## if outPath is not exist, then build it.
27 | if outPath != "":
28 | os.mkdir(outPath)
29 | env.workspace = outPath
30 | ## Split polygon by fieldName
31 | polyNames, flag = ListFieldValues(shpFile, filedName)
32 | if(flag):
33 | arcpy.gp.overwriteOutput = 1
34 | ## Get the cellsize of originRasterFile
35 | cellSizeResult = arcpy.GetRasterProperties_management(originRasterFile, "CELLSIZEX")
36 | cellSize = cellSizeResult.getOutput(0)
37 | bufferDistance = float(cellSize) * bufferSize
38 | arcpy.Split_analysis(shpFile, shpFile, filedName, outPath)
39 | polyFiles = []
40 | polyBufferFiles = []
41 | polyFinalFiles = []
42 | rasterFiles = []
43 | for name in polyNames:
44 | polyFile = outPath + os.sep + name + '.shp'
45 | polyBufferFile = outPath + os.sep + name + '_buf.shp'
46 | polyFinalFile = outPath + os.sep + name + '_final.shp'
47 | if suffix is None:
48 | rasterFile = outPath + os.sep + name + '.tif'
49 | else:
50 | rasterFile = outPath + os.sep + name + suffix + '.tif'
51 | polyFiles.append(polyFile)
52 | polyBufferFiles.append(polyBufferFile)
53 | rasterFiles.append(rasterFile)
54 | polyFinalFiles.append(polyFinalFile)
55 | arcpy.Buffer_analysis(polyFile, polyBufferFile, bufferDistance, "OUTSIDE_ONLY")
56 | arcpy.Merge_management([polyFile, polyBufferFile], polyFinalFile)
57 |
58 | if arcpy.CheckOutExtension("Spatial") == "CheckedOut":
59 | for i in range(0,len(polyBufferFiles)):
60 | tempRaster = arcpy.sa.ExtractByMask(originRasterFile, polyFinalFiles[i])
61 | tempRaster.save(rasterFiles[i])
62 | else:
63 | print "The %s is not exist in %s" % (filedName, shpFile)
64 | return None
65 |
66 | if __name__ == '__main__':
67 | ## input
68 | MultiPolyShp = r'D:\data\GLake\basins.shp'
69 | FieldName = "Code" ## Field used to name raster files
70 | RasterFile = r'D:\data\GLake\glake_id.tif'
71 | #RasterFile = r'D:\data\GLake\srtm_tp.tif'
72 | BufferSize = 20 ## By default, every single polygon will buffer a distance of 10*cellsize
73 | suffix = "_ID" ## If no suffix, set as None
74 | ## output
75 | outDir = r'D:\data\GLake\GLoutput'
76 | ## run
77 | ExtractRasterByMultiPolygon(MultiPolyShp, FieldName, RasterFile, BufferSize, suffix, outDir)
78 |
--------------------------------------------------------------------------------
/HydroDataDownload/Down_Fujian_RealTimeData_Shuizhi.py:
--------------------------------------------------------------------------------
1 |
2 | # coding=utf-8
3 | #
4 | # 福建省地表水水质实时信息公开系统(试运行)
5 | # https://szfb.fjeec.cn:444/AutoData/Business/DataPublish_FJ/index.html
6 | #
7 | #
8 | #
9 | # Created by Liangjun Zhu (zlj@lreis.ac.cn)
10 | # Updated: 08/17/2020
11 | # 06/30/2021 Add verify=False to request.get() function
12 | from __future__ import unicode_literals
13 |
14 | import os
15 | import json
16 | import datetime
17 | from io import open
18 | import requests
19 | from requests.exceptions import RequestException
20 |
21 | from apscheduler.schedulers.blocking import BlockingScheduler
22 | from pygeoc.utils import UtilClass
23 |
24 | REAL_URL = 'https://szfb.fjeec.cn:444/API/PublicService/ShuiZhiFaBu/GetRealData?AreaID=&RiverID='
25 | REQ_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
26 | '(KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
27 | 'Accept': 'application/json,text/plain,*/*',
28 | 'Content-Type': 'application/json;charset=utf8',
29 | 'Authorization': 'Public_Web=6A607FAB00686B7B363BD9A81B835649'}
30 |
31 |
32 | def get_realtime_data():
33 | try:
34 | response = requests.get(REAL_URL, headers=REQ_HEADERS, verify=False)
35 | if response.status_code == 200:
36 | tmpstr = response.text
37 | tmpstr = tmpstr.replace('\r\n', '')
38 | tmpstr = tmpstr.replace('\n', '')
39 | tmpstr = tmpstr.replace('\r', '')
40 | return tmpstr
41 | return None
42 | except RequestException as excpt:
43 | print(excpt)
44 | print('Get data failed from %s' % REAL_URL)
45 |
46 |
47 | def down_routinely(savedir):
48 | """Write response string to log file and Parsed JSON to YYYY-MM-DD-HH.json file."""
49 | ctime = datetime.datetime.now()
50 | ctime_str = ctime.strftime('%Y-%m-%d %H:%M:%S')
51 | print('Executed at %s' % ctime_str)
52 |
53 | dstring = get_realtime_data()
54 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.data', 'a', encoding='utf-8') as logf:
55 | if dstring is None:
56 | logf.write('[%s] Get data failed!\n' % ctime_str)
57 | else:
58 | logf.write('[%s] %s\n' % (ctime_str, dstring))
59 | if dstring is None:
60 | return
61 |
62 | djson = json.loads(dstring)
63 | if 'ResultList' not in djson:
64 | return
65 | if len(djson['ResultList']) < 1:
66 | return
67 | if 'DataTime' not in djson['ResultList'][0]:
68 | return
69 | data_time_str = djson['ResultList'][0]['DataTime']
70 | data_time = datetime.datetime.strptime(data_time_str, '%Y/%m/%d %H:%M')
71 |
72 | rawdir = wp + os.sep + 'raw_data'
73 | UtilClass.mkdir(rawdir)
74 | json_name = '%s.json' % (data_time.strftime('%Y-%m-%d-%H'))
75 | json_file = rawdir + os.sep + json_name
76 |
77 | if os.path.exists(json_file):
78 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.log', 'a', encoding='utf-8') as logf:
79 | logf.write('[%s] %s already exist.\n' % (ctime_str, json_name))
80 | else:
81 | with open(json_file, 'w', encoding='utf-8') as jf:
82 | jf.write(json.dumps(djson, indent=4, ensure_ascii=False))
83 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.log', 'a', encoding='utf-8') as logf:
84 | logf.write('[%s] %s saved successfully.\n' % (ctime_str, json_name))
85 |
86 |
87 | if __name__ == "__main__":
88 | wp = UtilClass.current_path(lambda: 0)
89 | # wp = 'D:\\tmp\\fujian_shuizhi_realtime'
90 | UtilClass.mkdir(wp)
91 |
92 | # down_routinely(wp)
93 |
94 | sched = BlockingScheduler()
95 | sched.add_job(down_routinely, args=[wp], trigger='interval', seconds=10800)
96 | sched.start()
97 |
--------------------------------------------------------------------------------
/HydroDataDownload/GetYcRainSum_20150805-zhulm.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 |
3 | import urllib2,httplib,string,sys,time
4 | from xml.etree import ElementTree as xmlTree
5 |
6 | #--------获取页面,保存为XML--------------------
7 | def GetYcRainSum(sTime, eTime, stcd, tDivide):
8 | params = \
9 | '''
10 |
11 |
12 |
13 | %s
14 | %s
15 | %s
16 | %s
17 |
18 |
19 | '''
20 | SoapMessage = params % (sTime, eTime, stcd, tDivide)
21 |
22 | def getXML(sTime, eTime, stcd, tDivide):
23 | try:
24 | #-------------这个页面打不开----------
25 | conn = httplib.HTTP("yc.wswj.net")
26 | #-------------这个页面打不开----------
27 | # request是自动发送header,putrequest要手动发送header(两者之间的区别)
28 | conn.putrequest("POST","/ahyc/web_rain/Service.asmx")
29 |
30 | conn.putheader("Accept","*/*")
31 | conn.putheader("Accept-Encoding","gzip,deflate,sdch")
32 | conn.putheader("Accept-Language","zh-CN,zh;q=0.8,en;q=0.6")
33 | conn.putheader("Host","yc.wswj.net")
34 | conn.putheader("Origin","http://yc.wswj.net")
35 | #conn.putheader("Connection","keep-alive");
36 | #-------这个页面可以打开,是降水分布的网址--------这个页面含有降水数据,不知道如何查看页面具体内容--------------
37 | conn.putheader("Referer","http://yc.wswj.net/ahyc/Main2.swf")
38 |
39 | conn.putheader("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36");
40 | conn.putheader("Content-Type","text/xml; charset=utf-8")
41 | conn.putheader("Content-Length", "%d" % len(SoapMessage))
42 | conn.putheader("SOAPAction","http://tempuri.org/GetYcRainSum")
43 | conn.putheader("Cookie","ASP.NET_SessionId=suj53q55f3qxjp55anbw0sjg; CNZZDATA3906820=cnzz_eid%3D1809096373-1405232367-http%253A%252F%252Fhfswj.net%252F%26ntime%3D1405302063")
44 | conn.endheaders()
45 | conn.send(SoapMessage)
46 | statuscode, statusmessage, header = conn.getreply()
47 | print "Response: ",statuscode, statusmessage
48 | if statuscode == 200:
49 | #print "Headers: ", header
50 | Res = conn.getfile().read()
51 | #print str(Res).decode('utf-8')
52 | return Res
53 |
54 | except:
55 | time.sleep(20)
56 | return getXML(sTime, eTime, stcd, tDivide)
57 | return getXML(sTime, eTime, stcd, tDivide)
58 |
59 |
60 | #---------------将获取的XML站点转换为“CSV”格式-----------------
61 | def SaveXML2Csv(Res, findName, savePath, year):
62 | tree = xmlTree.fromstring(Res)
63 | nodes = tree.findall(findName)
64 | if not nodes:
65 | return 0
66 | else:
67 | f=open(savePath,"a")
68 |
69 | for node in nodes:
70 | itemline = ""
71 | month = str(node[0].text.encode('utf-8'))[0:2]
72 | day = str(node[0].text.encode('utf-8'))[5:7]
73 | HH = str(node[0].text.encode('utf-8'))[10:12]
74 | itemline+=str(year)+"/"+month+"/"+day+" "+HH+":00"+","+str(node[1].text.encode('utf-8'))+"\n"
75 | #print itemline
76 |
77 | f.write(itemline)
78 | f.close()
79 | return 1
80 |
81 | #--------------主函数入口------------------------
82 | if __name__ == '__main__':
83 | print "Beigin to download YcRainSum data!"
84 |
85 |
86 | f=open(r"D:\WorkSpace\Download_RainData\Zhanhao.txt","r")
87 |
88 |
89 | ZhanHaos = []
90 | for eachSite in f:
91 | ZhanHaos.append(eachSite.split('\n')[0])
92 | f.close()
93 | #print len(ZhanHaos)
94 | print ZhanHaos
95 |
96 | def downData(start, end, ZhanHao, year):
97 | #------------开始下载页面-----------------
98 | xmlText = GetYcRainSum(start, end, ZhanHao, "60")
99 | savename=ZhanHao+ '-' +year
100 |
101 |
102 | savePath = r'D:\WorkSpace\Download_RainData\2011\%s.txt' % savename
103 |
104 |
105 | #------------调用前面函数,进行格式转换--------------
106 | success = SaveXML2Csv(xmlText, ".//GetRainValue", savePath, year)
107 | #ZhanHaos = ['62903180','62942737','62942707','62915310','62933800','62942747','62922800','62942717','62942757']
108 |
109 | #-----------降水数据从2007年开始才有记录--------------------
110 | #years = ['2007','2008','2009','2010','2011','2012','2013','2014','2015']
111 | years = ['2013']
112 | #years = ['2015']
113 | #months = ['01','02','03','04','05','06','07','08','09','10','11','12']
114 | #downData('2013-12-01 00:00','2013-12-31 00:00', '62903180')
115 |
116 | for ZhanHao in ZhanHaos:
117 | for year in years:
118 | print "Downloading "+str(ZhanHao)+"'s data in "+str(year)+" ..."
119 | sTime = str(year)+'-01-01 00:00'
120 | eTime = str(year)+'-12-31 23:00'
121 | print ZhanHao,sTime,eTime
122 | downData(sTime,eTime, ZhanHao, year)
123 | print "Download "+str(ZhanHao)+"'s data successfully!"
124 | print "Download Succeed!"
125 |
126 |
127 |
128 |
129 |
--------------------------------------------------------------------------------
/HydroDataDownload/ReadDatabase_SURF_CLI_CHN_MUL_DAY.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Func. : Read Database of SURF_CLI_CHN_MUL_DAY_V3.0
3 | # Author: Liangjun Zhu
4 | # Date : 2016-4-11
5 | # Email : zlj@lreis.ac.cn
6 | # Blog : http://zhulj.net/python/2016/04/11/Constructing-SURF_CLI_CHN_MUL_DAY_V3.0-database.html
7 |
8 | import datetime
9 | import os
10 | import sqlite3
11 |
12 |
13 | def get_conn(path):
14 | """
15 | get connection of Sqlite
16 | :param path: path of Sqlite database
17 | """
18 | conn = sqlite3.connect(path)
19 | if os.path.exists(path) and os.path.isfile(path):
20 | # print('database in hardware :[{}]'.format(path))
21 | return conn
22 | else:
23 | conn = None
24 | # print('database in memory :[:memory:]')
25 | return sqlite3.connect(':memory:')
26 |
27 |
28 | def get_cursor(conn):
29 | """
30 | get cursor of current connection
31 | :param conn: connection of Sqlite
32 | """
33 | if conn is not None:
34 | return conn.cursor()
35 | else:
36 | return get_conn('').cursor()
37 |
38 |
39 | def close_all(conn, cu):
40 | """
41 | close connection and cursor of Sqlite
42 | :param conn: connection of Sqlite
43 | :param cu: cursor of conn
44 | """
45 | try:
46 | if cu is not None:
47 | cu.close()
48 | finally:
49 | if cu is not None:
50 | cu.close()
51 |
52 |
53 | def getTablesList(dbpath):
54 | """
55 | Get all tables' name in Sqlite database
56 | :param dbpath:
57 | :return: table names
58 | """
59 | conn = sqlite3.connect(dbpath)
60 | cu = get_cursor(conn)
61 | tabs = cu.execute(
62 | "select name from sqlite_master where type = 'table' order by name").fetchall()
63 | tabList = list()
64 | for tab in tabs:
65 | if len(tab[0]) == 6:
66 | tabList.append(tab[0])
67 | close_all(conn, cu)
68 | return tabList
69 |
70 |
71 | def fetchData(conn, sql):
72 | """
73 | Query data by sql
74 | :param conn:
75 | :param sql:
76 | :return: data queried
77 | """
78 | data = list()
79 | if sql is not None and sql != '':
80 | cu = get_cursor(conn)
81 | cu.execute(sql)
82 | r = cu.fetchall()
83 | if len(r) > 0:
84 | for e in range(len(r)):
85 | # print(r[e])
86 | data.append(r[e])
87 | else:
88 | print('the [{}] is empty or equal None!'.format(sql))
89 | return data
90 |
91 |
92 | def saveToCSV(data, csvPath, flag='climData', fields=None):
93 | f = open(csvPath, "w")
94 | title = ''
95 | if flag == 'climData':
96 | if fields is None:
97 | title = 'stationID,datetimeBJ,avgPRS,maxPRS,minPRS,avgTEM,maxTEM,minTEM,' \
98 | 'avgRHU,minRHU,PRE208,PRE820,PRE,smEVP,lgEVP,avgWIN,maxWIN,maxWINASP,' \
99 | 'extWIN,extWINASP,SSD,avgGST,maxGST,minGST\n'
100 | else:
101 | title = ','.join(fields)
102 | title += '\n'
103 | elif flag == 'stationInfo':
104 | title = 'stationID,lat,lon,alti\n'
105 | f.write(title)
106 | for items in data:
107 | itemsStr = ''
108 | if flag == 'stationInfo':
109 | items = items[0]
110 | for item in items:
111 | itemsStr += str(item)
112 | itemsStr += ','
113 | itemsStr = itemsStr[:-1]
114 | itemsStr += '\n'
115 | f.write(itemsStr)
116 | f.close()
117 |
118 |
119 | def isNum(value):
120 | try:
121 | x = int(value)
122 | except TypeError:
123 | return False
124 | except ValueError:
125 | return False
126 | except Exception:
127 | return False
128 | else:
129 | return True
130 |
131 |
132 | def QueryDatabase(dbpath, savePath, stationIDs, startTime, endTime, fields=None):
133 | """
134 | Query and save data from Sqlite database
135 | :param dbpath:
136 | :param savePath:
137 | :param stationIDs:
138 | :param startTime:
139 | :param endTime:
140 | :param newfields: List of selected fields, None means all fields.
141 | :return:
142 | """
143 | tableList = getTablesList(dbpath)
144 | conn = sqlite3.connect(dbpath)
145 | if not os.path.isdir(savePath):
146 | os.mkdir(savePath)
147 | stationInfoCSVPath = savePath + os.sep + 'stationInfo.csv'
148 | stationInfoData = list()
149 | if stationIDs == list():
150 | stationIDs = getTablesList(dbpath)
151 | else:
152 | for i in range(len(stationIDs)):
153 | if isNum(stationIDs[i]):
154 | stationIDs[i] = 'S' + str(stationIDs[i])
155 | else:
156 | stationIDs[i] = 'S' + stationIDs[i]
157 | if fields is None:
158 | fields = ['stID', 'date', 'avgPRS', 'maxPRS', 'minPRS', 'avgTEM', 'maxTEM', 'minTEM',
159 | 'avgRHU', 'minRHU', 'PRE208', 'PRE820', 'PRE', 'smEVP', 'lgEVP', 'avgWIN',
160 | 'maxWIN', 'maxWINASP', 'extWIN', 'extWINASP', 'SSD', 'avgGST', 'maxGST', 'minGST']
161 | else:
162 | fields.insert(0, 'date')
163 | fields.insert(0, 'stID')
164 | selects = ','.join(fields)
165 | for tabName in stationIDs:
166 | # tabName = 'S' + stationID
167 | stationID = tabName[1:]
168 | if tabName in tableList:
169 | csvPath = savePath + os.sep + tabName + '.csv'
170 | startT = datetime.datetime(startTime[0], startTime[1], startTime[2])
171 | endT = datetime.datetime(endTime[0], endTime[1], endTime[2])
172 | endT += datetime.timedelta(days=1)
173 | startTStr = startT.strftime("%Y-%m-%d %H:%M:%S")[:10]
174 | endTStr = endT.strftime("%Y-%m-%d %H:%M:%S")[:10]
175 | fetch_data_sql = '''SELECT %s FROM %s WHERE date BETWEEN "%s" AND
176 | "%s" ORDER BY date''' % (selects, tabName, startTStr, endTStr)
177 | # print(fetch_data_sql)
178 | data = fetchData(conn, fetch_data_sql)
179 | saveToCSV(data, csvPath, fields=fields)
180 | fetch_station_sql = '''SELECT * FROM stationInfo WHERE stID=%s ''' % stationID
181 | stationInfoData.append(fetchData(conn, fetch_station_sql))
182 | saveToCSV(stationInfoData, stationInfoCSVPath, 'stationInfo')
183 | conn.close()
184 |
185 |
186 | if __name__ == '__main__':
187 | # Input parameters
188 | SQLITE_DB_PATH = r'D:\data\common_GIS_Data\SURF_CLI_CHN_MUL_DAY_V3.0\SURF_CLI_CHN_MUL_DAY_V3-201712.db'
189 | QUERY_STATION_IDs = [58911]
190 | QUERY_DATE_FROM = [1950, 1, 1] # format: Year, Month, Day
191 | QUERY_DATE_END = [2017, 12, 31]
192 | # Available fields:
193 | # avgPRS,maxPRS,minPRS,avgTEM,maxTEM,minTEM, avgRHU,minRHU,
194 | # PRE208,PRE820,PRE,smEVP,lgEVP,avgWIN,maxWIN,maxWINASP,
195 | # extWIN,extWINASP,SSD,avgGST,maxGST,minGST
196 | SELECTED_FIELDS = None
197 |
198 | SAVE_PATH = r'D:\tmp'
199 |
200 | QueryDatabase(SQLITE_DB_PATH, SAVE_PATH, QUERY_STATION_IDs, QUERY_DATE_FROM, QUERY_DATE_END,
201 | SELECTED_FIELDS)
202 |
--------------------------------------------------------------------------------
/HydroDataDownload/anhui_precipitation_download.py:
--------------------------------------------------------------------------------
1 |
2 | # coding=utf-8
3 | #
4 | # Author: Liang-Jun Zhu
5 | # Email: zlj@lreis.ac.cn
6 | #
7 |
8 | import httplib, time
9 | from xml.etree import ElementTree as xmlTree
10 |
11 |
12 | def GetYcRainSum(sTime, eTime, stcd, tDivide):
13 | params = \
14 | '''
15 |
16 |
17 |
18 | %s
19 | %s
20 | %s
21 | %s
22 |
23 |
24 | '''
25 | SoapMessage = params % (stcd, sTime, eTime, tDivide)
26 |
27 | def getXML(sTime, eTime, stcd, tDivide):
28 | try:
29 | conn = httplib.HTTP("yc.wswj.net")
30 | conn.putrequest("POST", "/ahyc/web_rain/Service.asmx")
31 | conn.putheader("Accept", "*/*")
32 | conn.putheader("Accept-Encoding", "gzip,deflate")
33 | conn.putheader("Accept-Language", "zh-CN,zh;q=0.8,en;q=0.6")
34 | conn.putheader("Host", "yc.wswj.net")
35 | conn.putheader("Origin", "http://yc.wswj.net")
36 | # conn.putheader("Connection","keep-alive")
37 | conn.putheader("Referer", "http://yc.wswj.net/ahyc/Main73.swf")
38 | conn.putheader("User-Agent",
39 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36")
40 | conn.putheader("Content-Type", "text/xml; charset=utf-8")
41 | conn.putheader("Content-Length", "%d" % len(SoapMessage))
42 | conn.putheader("SOAPAction", "http://tempuri.org/getYlZzt")
43 | conn.putheader("Cookie",
44 | "td_cookie=18446744071625666010; CNZZDATA3906820=cnzz_eid%3D1560662269-1465952761-http%253A%252F%252Fyc.wswj.net%252F%26ntime%3D1465952761;ASP.NET_SessionId=nurmvus304xbfjofntol04jf")
45 | conn.endheaders()
46 | conn.send(SoapMessage)
47 | # conn.set_debuglevel(1)
48 | statuscode, statusmessage, header = conn.getreply()
49 | #print "Response: ", statuscode, statusmessage
50 | if statuscode == 200:
51 | #print "Headers: ", header
52 | Res = conn.getfile().read()
53 | #print str(Res).decode('utf-8')
54 | return Res
55 | ## else:
56 | ## time.sleep(20)
57 | ## return getXML(sTime, eTime, stcd, tDivide)
58 | except:
59 | time.sleep(20)
60 | return getXML(sTime, eTime, stcd, tDivide)
61 |
62 | return getXML(sTime, eTime, stcd, tDivide)
63 |
64 |
65 | def SaveXML2Csv(Res, findName, savePath, year):
66 | tree = xmlTree.fromstring(Res)
67 | nodes = tree.findall(findName)
68 | if not nodes:
69 | return 0
70 | else:
71 | f = open(savePath, "a")
72 | ## titleline = ""
73 | ## for Title in nodes[0]:
74 | ## titleline += str(Title.tag.encode('utf-8'))+","
75 | ## titleline+="\n"
76 | ## f.write(titleline)
77 | for node in nodes:
78 | itemline = ""
79 | ### previous code, deprecated
80 | # month = str(node[0].text.encode('utf-8'))[0:2]
81 | # day = str(node[0].text.encode('utf-8'))[5:7]
82 | # HH = str(node[0].text.encode('utf-8'))[10:12]
83 | ### updated by LJ, 2016-6-15
84 | timeString = node[0].text
85 | valueString = node[1].text
86 | yyyy = timeString[0:4]
87 | mm = timeString[5:7]
88 | dd = timeString[8:10]
89 | HHMM = timeString[11:16]
90 | itemline += yyyy + "/" + mm + "/" + dd + " " + HHMM + "," + valueString + "\n"
91 | # print itemline
92 | f.write(itemline)
93 | f.close()
94 | return 1
95 |
96 |
97 | if __name__ == '__main__':
98 | print ("Beigin to download Anhui Rainfall data!")
99 |
100 | ## newSiteLines = []
101 | ## for i in range(len(ZhanHao)):
102 | ## xmlText = GetYcRainSum(ZhanHao[i], "2014-07-01 08:00", "2014-07-03 08:00", "1440")
103 | ## #print str(xmlText).decode('utf-8')
104 | ## savePath = r'E:\RainfallData_Anhui\YlZzt\%s.csv' % ZhanMing[i]
105 | ## #print savePath
106 | ## if SaveXML2Csv(xmlText, ".//YLZZT", savePath):
107 | ## newSiteLine = ZhanHao[i]+","+ZhanMing[i]
108 | ## newSiteLines.append(newSiteLine)
109 | ## print i,newSiteLine
110 | ## print len(newSiteLines)
111 | ## f=open(r"e:\NewRainfallSites.txt","w")
112 | ## for line in newSiteLines:
113 | ## f.write(line)
114 | ## f.close()
115 |
116 | f=open(r"C:\z_data\zhongTianShe\climate\pcp_download_ahyc\ZhanHao_zhongtianshe.txt","r")
117 | ZhanHaos = []
118 | for eachSite in f:
119 | ZhanHaos.append(eachSite.split('\n')[0])
120 | f.close()
121 | print len(ZhanHaos)
122 | print ZhanHaos
123 | # ZhanHaos = ['62903180', '62942837', '62902700']
124 |
125 |
126 | def downData(start, end, ZhanHao, year):
127 | xmlText = GetYcRainSum(start, end, ZhanHao, "1440")
128 | savePath = r'C:\z_data\zhongTianShe\climate\pcp_download_ahyc\daily\%s-%s.txt' % (ZhanHao, str(year))
129 | success = SaveXML2Csv(xmlText, ".//data", savePath, year)
130 | # print success
131 |
132 |
133 | # ZhanHaos = ['62903180','62942737','62942707','62915310','62933800','62942747','62922800','62942717','62942757']
134 | # years = ['2005','2006','2007','2008','2009','2010','2011','2012','2013','2014']
135 | years = ['2011','2012', '2013']
136 | # months = ['01','02','03','04','05','06','07','08','09','10','11','12']
137 | # downData('2013-12-01 00:00','2013-12-31 00:00', '62903180')
138 |
139 | for ZhanHao in ZhanHaos:
140 | for year in years:
141 | print "Downloading " + str(ZhanHao) + "'s data in " + str(year) + " ..."
142 | sTime = str(year) + '-01-01 00:00'
143 | eTime = str(year) + '-12-31 23:00'
144 | print ZhanHao, sTime, eTime
145 | downData(sTime, eTime, ZhanHao, year)
146 | print "Download " + str(ZhanHao) + "'s data successfully!"
147 |
148 | print "Download Succeed!"
149 |
--------------------------------------------------------------------------------
/HydroDataDownload/climate_download.py:
--------------------------------------------------------------------------------
1 |
2 | # coding=utf-8
3 | # Author: Liangjun Zhu
4 | # Date : 2016-4-7
5 | # Email : zlj@lreis.ac.cn
6 | # Blog : zhulj.net
7 |
8 | import urllib2
9 | import os
10 | import sys
11 | import time
12 |
13 |
14 | def currentPath():
15 | path = sys.path[0]
16 | if os.path.isdir(path):
17 | return path
18 | elif os.path.isfile(path):
19 | return os.path.dirname(path)
20 |
21 |
22 | def mkdir(dir):
23 | if not os.path.isdir(dir):
24 | os.mkdir(dir)
25 |
26 |
27 | def downloadByUrl(curUrl, filePath):
28 | f = urllib2.urlopen(curUrl)
29 | data = f.read()
30 | with open(filePath, "wb") as code:
31 | code.write(data)
32 |
33 |
34 | def findUrlTxts(path):
35 | tempFiles = os.listdir(path)
36 | urlTxts = []
37 | for s in tempFiles:
38 | if s.split(".")[-1] == 'txt':
39 | urlTxts.append(path + os.sep + s)
40 | return urlTxts
41 |
42 |
43 | def ReadUrls(files):
44 | urls = []
45 | for file in files:
46 | curF = open(file)
47 | for line in curF:
48 | urls.append(line)
49 | curF.close()
50 | return urls
51 |
52 |
53 | def findStations(urls):
54 | stations = []
55 | for curUrl in urls:
56 | temp = curUrl.split("?")[0]
57 | fileName = temp.split("/")[-1]
58 | sss = fileName.split('-')
59 | for ss in sss:
60 | if len(ss) == 5 and not ss in stations:
61 | stations.append(ss)
62 | return stations
63 |
64 |
65 | def isStationNeeded(name):
66 | temp = name.split('-')
67 | flag = False
68 | for s in temp:
69 | if len(s) == 5:
70 | flag = True
71 | break
72 | return flag
73 |
74 |
75 | def climateDown(urls, savePath, eachNum = 200, timeout = 5):
76 | count = 1
77 | allcount = len(urls)
78 | for curUrl in urls:
79 | temp = curUrl.split("?")[0]
80 | saveName = temp.split("/")[-1]
81 | if isStationNeeded(saveName):
82 | curSavePath = savePath + os.sep + saveName
83 | if count % eachNum == 0:
84 | time.sleep(timeout)
85 | downloadByUrl(curUrl, curSavePath)
86 | print " %d / %d, %s" % (count, allcount, saveName)
87 | count += 1
88 |
89 |
90 | if __name__ == '__main__':
91 | CUR_PATH = currentPath()
92 | CUR_PATH = r'C:\Users\ZhuLJ\Desktop\climate_data_download'
93 | DOWN_PATH = CUR_PATH + os.sep + 'download'
94 | mkdir(DOWN_PATH)
95 | urlTxts = findUrlTxts(CUR_PATH)
96 | urls = ReadUrls(urlTxts)
97 | climateDown(urls, DOWN_PATH, 200, 5)
98 |
--------------------------------------------------------------------------------
/HydroDataDownload/netcdf4_pydap_test.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import netcdf4_pydap
4 |
5 | credentials={'username': 'zhuliangjun',
6 | 'password': 'Liangjun0130',
7 | 'authentication_url': 'https://urs.earthdata.nasa.gov/'}
8 | url = ('http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/'
9 | 'TRMM_3B42_Daily.7/2016/10/3B42_Daily.20161019.7.nc4')
10 |
11 | with netcdf4_pydap.Dataset(url, **credentials) as dataset:
12 | data = dataset.variables['SLP'][0,:,:]
13 | plt.contourf(np.squeeze(data))
14 | plt.show()
--------------------------------------------------------------------------------
/HydroDataDownload/test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | from cookielib import CookieJar
3 | from urllib import urlencode
4 |
5 | import urllib2
6 |
7 | # The user credentials that will be used to authenticate access to the data
8 |
9 | username = "zhuliangjun"
10 | password = "Liangjun0130"
11 |
12 | # The url of the file we wish to retrieve
13 |
14 | url = "http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/TRMM_3B42_Daily.7/" \
15 | "2016/10/3B42_Daily.20161019.7.nc4"
16 |
17 | # Create a password manager to deal with the 401 reponse that is returned from
18 | # Earthdata Login
19 |
20 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
21 | password_manager.add_password(None, "https://urs.earthdata.nasa.gov", username, password)
22 |
23 | # Create a cookie jar for storing cookies. This is used to store and return
24 | # the session cookie given to use by the data server (otherwise it will just
25 | # keep sending us back to Earthdata Login to authenticate). Ideally, we
26 | # should use a file based cookie jar to preserve cookies between runs. This
27 | # will make it much more efficient.
28 |
29 | cookie_jar = CookieJar()
30 |
31 | # Install all the handlers.
32 |
33 | opener = urllib2.build_opener(
34 | urllib2.HTTPBasicAuthHandler(password_manager),
35 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see
36 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses
37 | urllib2.HTTPCookieProcessor(cookie_jar))
38 | urllib2.install_opener(opener)
39 |
40 | # Create and submit the request. There are a wide range of exceptions that
41 | # can be thrown here, including HTTPError and URLError. These should be
42 | # caught and handled.
43 |
44 | request = urllib2.Request(url)
45 | response = urllib2.urlopen(request)
46 |
47 | # Print out the result (not a good idea with binary data!)
48 |
49 | body = response.read()
50 | print body
51 |
--------------------------------------------------------------------------------
/HydroDataDownload/trmm_download.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Author: Liangjun Zhu
3 | # Date : 2016-4-7
4 | # Email : zlj@lreis.ac.cn
5 | # Blog : zhulj.net
6 |
7 | import urllib2
8 | import os
9 | import sys
10 | import time
11 |
12 |
13 | def currentPath():
14 | path = sys.path[0]
15 | if os.path.isdir(path):
16 | return path
17 | elif os.path.isfile(path):
18 | return os.path.dirname(path)
19 |
20 |
21 | def mkdir(dir):
22 | if not os.path.isdir(dir):
23 | os.mkdir(dir)
24 |
25 |
26 | def downloadByUrl(curUrl, filePath):
27 | f = urllib2.urlopen(curUrl)
28 | data = f.read()
29 | with open(filePath, "wb") as code:
30 | code.write(data)
31 |
32 |
33 | def findUrlTxts(path):
34 | tempFiles = os.listdir(path)
35 | urlTxts = []
36 | for s in tempFiles:
37 | if s.split(".")[-1] == 'txt':
38 | urlTxts.append(path + os.sep + s)
39 | return urlTxts
40 |
41 |
42 | def ReadUrls(files):
43 | urls = []
44 | for file in files:
45 | curF = open(file)
46 | for line in curF:
47 | line = line.split('\n')[0]
48 | urls.append(line)
49 | curF.close()
50 | return urls
51 |
52 |
53 | def findStations(urls):
54 | stations = []
55 | for curUrl in urls:
56 | temp = curUrl.split("?")[0]
57 | fileName = temp.split("/")[-1]
58 | sss = fileName.split('-')
59 | for ss in sss:
60 | if len(ss) == 5 and not ss in stations:
61 | stations.append(ss)
62 | return stations
63 |
64 | def climateDown(urls, savePath, usrname = '', pwd = '', eachNum = 200, timeout = 5):
65 | count = 1
66 | allcount = len(urls)
67 | for curUrl in urls:
68 | saveName = curUrl.split("/")[-1]
69 | curSavePath = savePath + os.sep + saveName
70 | if count % eachNum == 0:
71 | time.sleep(timeout)
72 | if usrname != '' and pwd != '':
73 | downNASAEarthdata(curUrl, curSavePath, usrname, pwd)
74 | else:
75 | downloadByUrl(curUrl, curSavePath)
76 | print " %d / %d, %s" % (count, allcount, saveName)
77 | count += 1
78 |
79 | def downNASAEarthdata(curUrl, curSavePath, usrname, pwd):
80 | from cookielib import CookieJar
81 | from urllib import urlencode
82 | import urllib2
83 |
84 | # The user credentials that will be used to authenticate access to the data
85 | #
86 | # username = "zhuliangjun"
87 | # password = "Liangjun0130"
88 | #
89 | # # The url of the file we wish to retrieve
90 | #
91 | # url = "http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/TRMM_3B42_Daily.7/" \
92 | # "2016/10/3B42_Daily.20161019.7.nc4"
93 |
94 | # Create a password manager to deal with the 401 reponse that is returned from
95 | # Earthdata Login
96 |
97 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
98 | password_manager.add_password(None, "https://urs.earthdata.nasa.gov", usrname, pwd)
99 |
100 | # Create a cookie jar for storing cookies. This is used to store and return
101 | # the session cookie given to use by the data server (otherwise it will just
102 | # keep sending us back to Earthdata Login to authenticate). Ideally, we
103 | # should use a file based cookie jar to preserve cookies between runs. This
104 | # will make it much more efficient.
105 |
106 | cookie_jar = CookieJar()
107 |
108 | # Install all the handlers.
109 |
110 | opener = urllib2.build_opener(
111 | urllib2.HTTPBasicAuthHandler(password_manager),
112 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see
113 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses
114 | urllib2.HTTPCookieProcessor(cookie_jar))
115 | urllib2.install_opener(opener)
116 |
117 | # Create and submit the request. There are a wide range of exceptions that
118 | # can be thrown here, including HTTPError and URLError. These should be
119 | # caught and handled.
120 |
121 | request = urllib2.Request(curUrl)
122 | response = urllib2.urlopen(request)
123 |
124 | # Print out the result (not a good idea with binary data!)
125 |
126 | data = response.read()
127 | with open(curSavePath, "wb") as code:
128 | code.write(data)
129 |
130 | if __name__ == '__main__':
131 | CUR_PATH = currentPath()
132 | CUR_PATH = r'C:\Users\ZhuLJ\Desktop\TRMM_download'
133 | usrname = 'zhuliangjun'
134 | pwd = 'Liangjun0130'
135 | DOWN_PATH = CUR_PATH + os.sep + 'download'
136 | mkdir(DOWN_PATH)
137 | urlTxts = findUrlTxts(CUR_PATH)
138 | urls = ReadUrls(urlTxts)
139 | climateDown(urls, DOWN_PATH, usrname = usrname, pwd = pwd)
140 |
141 |
--------------------------------------------------------------------------------
/Hydrograph/Hydrograph-Storm.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | #import pylab
3 | import datetime,os,sys
4 | import matplotlib.pyplot as plt
5 | from matplotlib.dates import HourLocator, DateFormatter
6 | import os, time
7 |
8 | def PlotResult(tStart, tEnd, qFile, clr, tOffset=0):
9 | f = open(qFile)
10 | lines = f.readlines()
11 | f.close()
12 |
13 | tlist = []
14 | qlist = []
15 | for line in lines:
16 | items = line.split()
17 | date = datetime.datetime.strptime(items[0]+" "+items[1], '%Y-%m-%d %X')
18 | date = datetime.timedelta(minutes=tOffset) + date
19 | #print date
20 | if date < tStart or date > tEnd:
21 | continue
22 | tlist.append(date)
23 | qlist.append(float(items[2]))
24 | p, = plt.plot_date(tlist, qlist, clr,xdate=True, ydate=False, ls='-', marker='o', linewidth=2.0)
25 | return qlist, p
26 |
27 | def PlotPrec(ax, precFile, tStart, tEnd, clr):
28 | f = open(precFile)
29 | lines = f.readlines()
30 | f.close()
31 | tlist = []
32 | qlist = []
33 | for line in lines:
34 | items = line.split()
35 | startDate = datetime.datetime.strptime(items[0]+" "+items[1], '%Y-%m-%d %X')
36 | endDate = datetime.datetime.strptime(items[0]+" "+items[2], '%Y-%m-%d %X')
37 | if startDate < tStart or endDate > tEnd:
38 | continue
39 | tlist.append(startDate)
40 | tlist.append(startDate)
41 | tlist.append(endDate)
42 | tlist.append(endDate)
43 | qlist.append(0)
44 | qlist.append(float(items[3]))
45 | qlist.append(float(items[3]))
46 | qlist.append(0)
47 |
48 | p, = ax.plot_date(tlist, qlist, clr,xdate=True, ydate=False)
49 | ax.fill(tlist,qlist,'b')
50 | return qlist, p
51 |
52 | def NashCoef(qObs, qSimu):
53 | n = min(len(qObs), len(qSimu))
54 | ave = sum(qObs)/n
55 | a1 = 0
56 | a2 = 0
57 | for i in range(n):
58 | a1 = a1 + pow(qObs[i]-qSimu[i], 2)
59 | a2 = a2 + pow(qObs[i] - ave, 2)
60 | return 1 - a1/a2
61 |
62 | def currentPath():
63 | path = sys.path[0]
64 | if os.path.isdir(path):
65 | return path
66 | elif os.path.isfile(path):
67 | return os.path.dirname(path)
68 | if __name__ == '__main__':
69 | year=1988
70 | tStart = datetime.datetime(year, 8, 7, 19)
71 | tEnd = datetime.datetime(year, 8, 8, 19)
72 | baseFolder = currentPath()
73 |
74 | fig, ax = plt.subplots()
75 | fig.autofmt_xdate()
76 | #fig.autofmt_xdate() this code should be here, other than the end of this program!!!
77 | sim_qFile = baseFolder+r'\simuS.txt'
78 | obs_qFile = baseFolder+r'\obsS.txt'
79 | tOffset = 0
80 | qSimu, pSimu = PlotResult(tStart, tEnd, sim_qFile, 'r', tOffset)
81 | qObs, pObs = PlotResult(tStart, tEnd, obs_qFile, 'g', tOffset)
82 | fsize = 16
83 | plt.xlabel(u"Time",fontsize=fsize)
84 | plt.ylabel(u'Discharge(m3/s)',fontsize=fsize)
85 |
86 | plt.legend([pObs, pSimu], ["Observation", "Simulation"], loc=7)
87 | ns = NashCoef(qObs, qSimu)
88 | plt.title("Nash: %.3f" % (ns,))
89 | ax.set_ylim(min(min(qSimu),min(qObs))-10,1.4*max(max(qSimu),max(qObs)))
90 |
91 | ax2 = ax.twinx()
92 | ax2.set_ylabel(r"Precipitation (mm)", fontsize=fsize)
93 | precFile = baseFolder+r'\prec.txt'
94 | precList, precP = PlotPrec(ax2, precFile, tStart, tEnd, 'b')
95 | ax2.set_ylim(4*max(precList),0)
96 |
97 | hours = HourLocator(byhour=range(24),interval=2)
98 | hoursFmt = DateFormatter('%b,%d %Hh')
99 | ax.xaxis.set_major_locator(hours)
100 | ax.xaxis.set_major_formatter(hoursFmt)
101 | ax.autoscale_view()
102 | # ax2.xaxis.set_major_locator(hours)
103 | # ax2.xaxis.set_major_formatter(hoursFmt)
104 | # ax2.autoscale_view()
105 |
106 | plt.grid(True)
107 | plt.show()
108 |
109 | print "Succeed!"
110 |
--------------------------------------------------------------------------------
/Hydrograph/ObsS.txt:
--------------------------------------------------------------------------------
1 | 1988-8-7 20:00:0 0.0004
2 | 1988-8-7 20:20:0 0.06136
3 | 1988-8-7 20:40:0 31.6707
4 | 1988-8-7 20:50:0 132.06
5 | 1988-8-7 21:00:0 203.742
6 | 1988-8-7 21:20:0 254.04
7 | 1988-8-7 21:40:0 121.672
8 | 1988-8-7 21:50:0 37.572
9 | 1988-8-7 22:00:0 7.9666
10 | 1988-8-7 22:30:0 6.8472
11 | 1988-8-7 23:00:0 1.68084
12 | 1988-8-8 01:40:0 0.41377
13 | 1988-8-8 08:00:0 2.5137
14 | 1988-8-8 08:35:0 13.2912
15 | 1988-8-8 09:00:0 21.1523
16 | 1988-8-8 09:15:0 29.7756
17 | 1988-8-8 09:40:0 14.7405
18 | 1988-8-8 12:10:0 0.67691
19 |
--------------------------------------------------------------------------------
/Hydrograph/prec.txt:
--------------------------------------------------------------------------------
1 | 1988-8-7 15:10:00 15:20:00 0.3
2 | 1988-8-7 20:10:00 20:18:00 7.7
3 | 1988-8-7 20:18:00 20:20:00 3
4 | 1988-8-7 20:20:00 20:35:00 2.6
5 | 1988-8-7 20:35:00 20:38:00 4.4
6 | 1988-8-7 20:38:00 20:47:00 10
7 | 1988-8-7 20:47:00 21:00:00 6.9
8 | 1988-8-7 21:00:00 21:10:00 1.1
9 | 1988-8-7 21:10:00 21:14:00 2
10 | 1988-8-7 21:14:00 21:20:00 2.8
11 | 1988-8-7 21:20:00 22:00:00 6
12 | 1988-8-7 22:00:00 23:00:00 7.6
13 | 1988-8-7 23:00:00 23:20:00 0.8
14 | 1988-8-8 1:35:00 2:00:00 0.4
15 | 1988-8-8 2:00:00 2:10:00 0.2
16 | 1988-8-8 2:10:00 2:23:00 2.2
17 | 1988-8-8 2:23:00 2:50:00 3.1
18 | 1988-8-8 3:15:00 4:00:00 2.4
19 | 1988-8-8 4:00:00 5:00:00 5.1
20 | 1988-8-8 5:00:00 5:35:00 0.9
21 | 1988-8-8 7:35:00 8:00:00 0.7
22 | 1988-8-8 8:00:00 8:13:00 7.8
23 | 1988-8-8 8:13:00 8:39:00 10
24 | 1988-8-8 8:39:00 8:40:00 0.7
25 | 1988-8-8 8:40:00 9:00:00 1.5
26 | 1988-8-8 9:00:00 10:00:00 1.2
27 | 1988-8-8 11:55:00 12:10:00 0.2
28 |
--------------------------------------------------------------------------------
/Hydrograph/simuS.txt:
--------------------------------------------------------------------------------
1 | 1988-8-7 20:00:0 0
2 | 1988-8-7 20:20:0 0
3 | 1988-8-7 20:40:0 0.00087
4 | 1988-8-7 20:50:0 59.17112
5 | 1988-8-7 21:00:0 196.16806
6 | 1988-8-7 21:20:0 212.47784
7 | 1988-8-7 21:40:0 83.86438
8 | 1988-8-7 21:50:0 55.98974
9 | 1988-8-7 22:00:0 36.89104
10 | 1988-8-7 22:30:0 5.05639
11 | 1988-8-7 23:00:0 0.82169
12 | 1988-8-8 01:40:0 0.00002
13 | 1988-8-8 08:00:0 0
14 | 1988-8-8 08:35:0 0.08759
15 | 1988-8-8 09:00:0 2.43216
16 | 1988-8-8 09:15:0 11.56709
17 | 1988-8-8 09:40:0 45.27423
18 | 1988-8-8 12:10:0 0.03099
19 |
--------------------------------------------------------------------------------
/NSGA2/.idea/NSGA2.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/NSGA2/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/NSGA2/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/NSGA2/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/NSGA2/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | 1476164274860
29 |
30 |
31 | 1476164274860
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/NSGA2/deap/dbf_test.py:
--------------------------------------------------------------------------------
1 | import csv
2 |
3 | from dbfread import DBF
4 |
5 | if __name__ == '__main__':
6 | dbff = r'C:\Users\ZhuLJ\Desktop\test\aug2.DBF'
7 | csvf = r'C:\Users\ZhuLJ\Desktop\test\aug2.csv'
8 | table = DBF(dbff)
9 | f = open(csvf, 'w')
10 | writerobj = csv.writer(f, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
11 | header = [v.encode('utf-8') for v in table.field_names]
12 | writerobj.writerow(header)
13 | for record in table:
14 | currec = list()
15 | for v in record.values():
16 | if isinstance(v, unicode):
17 | v = v.encode('utf-8')
18 | elif v is None:
19 | v = ''
20 | else:
21 | v = str(v)
22 | if len(v) > 8 and (v[0:2] == '14' or v[0:2] == '13' or v[0:2] == '622'):
23 | v = '\'%s' % v
24 | currec.append(v)
25 | writerobj.writerow(currec)
26 | f.close()
27 |
--------------------------------------------------------------------------------
/NSGA2/deap/demo1.py:
--------------------------------------------------------------------------------
1 | # Overview of DEAP document.
2 | import pickle
3 | import random
4 |
5 | import numpy
6 | from deap import base, creator
7 | from deap import tools
8 |
9 | # 1. Types
10 |
11 | # Create a class named 'FitnessMin', inherited from base.Fitness, has the weights attribute
12 | creator.create('FitnessMin', base.Fitness, weights=(1.0,))
13 | creator.create('Individual', list, fitness=creator.FitnessMin)
14 |
15 | # another example of creator a class, and initialize an object.
16 | creator.create("Foo", list, bar=dict, spam=1)
17 | x = creator.Foo()
18 | print x.bar, x.spam # {} 1
19 |
20 | # 2. Initialization
21 | IND_SIZE = 10
22 | toolbox = base.Toolbox()
23 |
24 |
25 | # example of how to use Toolbox
26 | def func(a, b, c=3):
27 | print a, b, c
28 |
29 |
30 | toolbox.register('myFunc', func, 2, c=4)
31 | toolbox.register('myFunc2', func)
32 | toolbox.myFunc(3) # 2 3 4, the register and call statements is equal to func(2, 3, 4)
33 | toolbox.myFunc2(2, 3, 4)
34 |
35 |
36 | class initParam(object):
37 | """Test."""
38 |
39 | def __init__(self, v):
40 | print ('initial, v: %f' % v)
41 | self.multiply = v
42 | self.fid = random.random()
43 | @staticmethod
44 | def get_random(v):
45 | cc = initParam(v)
46 | l = list()
47 | for i in range(10):
48 | l.append(random.random() * cc.multiply)
49 | print ('get_random, v: %s' % ','.join(str(i) for i in l))
50 | return l
51 |
52 |
53 | def initRepeatWithCfg(container, generator, cf, n=2):
54 | return container(generator(cf) for _ in xrange(n))
55 |
56 |
57 | def initIterateWithCfg(container, generator, cf):
58 | return container(generator(cf))
59 |
60 |
61 | toolbox.register('attribute', initParam.get_random)
62 | # toolbox.register('individual', initRepeatWithCfg, creator.Individual,
63 | # toolbox.attribute, n=IND_SIZE)
64 | toolbox.register('individual', initIterateWithCfg, creator.Individual, toolbox.attribute)
65 | toolbox.register('population', initRepeatWithCfg, list, toolbox.individual)
66 |
67 |
68 | # 3. Operators
69 | def evaluate(individual, n):
70 | return sum(individual) / n
71 |
72 |
73 | toolbox.register('mate', tools.cxTwoPoint)
74 | toolbox.register('mutate', tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
75 | toolbox.register('select', tools.selTournament, tournsize=3)
76 | toolbox.register('evaluate', evaluate)
77 |
78 | stats = tools.Statistics(key=lambda ind: ind.fitness.values)
79 | stats.register('avg', numpy.mean, axis=0)
80 | stats.register('std', numpy.std, axis=0)
81 | stats.register('min', numpy.min, axis=0)
82 | stats.register('max', numpy.max, axis=0)
83 |
84 | logbook = tools.Logbook()
85 |
86 |
87 | def main():
88 | cc = initParam(0.8)
89 | pop = toolbox.population(0.8, n=50)
90 |
91 | CXPB, MUTPB, NGEN = 0.5, 0.2, 40
92 |
93 | # evaluate the entire population
94 | fitnesses = map(toolbox.evaluate, pop, [9]*50)
95 | print len(fitnesses) # 50
96 | for ind, fit in zip(pop, fitnesses):
97 | ind.fitness.values = (fit,)
98 |
99 | for g in range(NGEN):
100 | # select the next generation individuals
101 | offspring = toolbox.select(pop, len(pop))
102 | # clone the selected individuals
103 | offspring = map(toolbox.clone, offspring)
104 | # Apply crossover and mutation on the offspring
105 | for child1, child2 in zip(offspring[::2], offspring[1::2]):
106 | if random.random() < CXPB:
107 | toolbox.mate(child1, child2)
108 | del child1.fitness.values
109 | del child2.fitness.values
110 | for mutant in offspring:
111 | if random.random() < MUTPB:
112 | toolbox.mutate(mutant)
113 | del mutant.fitness.values
114 |
115 | # Evaluate the individuals with an invalid fitness
116 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
117 | fitnesses = map(toolbox.evaluate, invalid_ind, [9]*len(invalid_ind))
118 | for ind, fit in zip(invalid_ind, fitnesses):
119 | ind.fitness.values = (fit,)
120 |
121 | # The population is entirely replaced by the offspring
122 | pop[:] = offspring
123 | record = stats.compile(pop)
124 | # print record
125 | logbook.record(gen=g, **record)
126 | return pop, logbook
127 |
128 |
129 | if __name__ == '__main__':
130 | main()
131 | logbook.header = 'gen', 'avg'
132 | print logbook
133 | gen = logbook.select('gen')
134 | fit_maxs = logbook.select('max')
135 | # import matplotlib.pyplot as plt
136 | # fig, ax1 = plt.subplots()
137 | # line1 = ax1.plot(gen, fit_maxs, 'b', label='Maximum fitness')
138 | # ax1.set_xlabel('Generation')
139 | # ax1.set_ylabel('Fitness', color='b')
140 | # for t1 in ax1.get_yticklabels():
141 | # t1.set_color('b')
142 | # labs = [l.get_label() for l in line1]
143 | # ax1.legend(line1, labs, loc='center right')
144 | # plt.show()
145 |
146 | # output logbook
147 | f = open(r'D:\tmp\logbook.txt', 'w')
148 | f.write(logbook.__str__())
149 | f.close()
150 |
--------------------------------------------------------------------------------
/NSGA2/inspyred/nsga_example_inspyred.py:
--------------------------------------------------------------------------------
1 | from random import Random
2 | from time import time
3 | import inspyred
4 | import matplotlib as plt
5 |
6 | def main(prng=None, display=False):
7 | if prng is None:
8 | prng = Random()
9 | prng.seed(time())
10 |
11 | problem = inspyred.benchmarks.Kursawe(3)
12 | ea = inspyred.ec.emo.NSGA2(prng)
13 | ea.variator = [inspyred.ec.variators.blend_crossover,
14 | inspyred.ec.variators.gaussian_mutation]
15 | ea.terminator = inspyred.ec.terminators.generation_termination
16 | final_pop = ea.evolve(generator=problem.generator,
17 | evaluator=problem.evaluator,
18 | pop_size=100,
19 | maximize=problem.maximize,
20 | bounder=problem.bounder,
21 | max_generations=80)
22 |
23 | if display:
24 | final_arc = ea.archive
25 | print('Best Solutions: \n')
26 | for f in final_arc:
27 | print(f)
28 | import matplotlib.pyplot as plt
29 | x = []
30 | y = []
31 | for f in final_arc:
32 | x.append(f.fitness[0])
33 | y.append(f.fitness[1])
34 | plt.scatter(x, y, color='b')
35 | # plt.savefig('{0} Example ({1}).pdf'.format(ea.__class__.__name__,
36 | # problem.__class__.__name__),
37 | # format='pdf')
38 | plt.show()
39 | return ea
40 |
41 | if __name__ == '__main__':
42 | main(display=True)
43 |
--------------------------------------------------------------------------------
/NSGA2/inspyred/parallel_evaluation_pp_example.py:
--------------------------------------------------------------------------------
1 | from random import Random
2 | from time import time
3 | import inspyred
4 | import math
5 |
6 | # Define an additional "necessary" function for the evaluator
7 | # to see how it must be handled when using pp.
8 | def my_squaring_function(x):
9 | return x**2
10 |
11 | def generate_rastrigin(random, args):
12 | size = args.get('num_inputs', 10)
13 | return [random.uniform(-5.12, 5.12) for i in range(size)]
14 |
15 | def evaluate_rastrigin(candidates, args):
16 | fitness = []
17 | for cs in candidates:
18 | fit = 10 * len(cs) + sum([(my_squaring_function(x - 1) -
19 | 10 * math.cos(2 * math.pi * (x - 1)))
20 | for x in cs])
21 | fitness.append(fit)
22 | return fitness
23 |
24 | def main(prng=None, display=False):
25 | if prng is None:
26 | prng = Random()
27 | prng.seed(time())
28 |
29 | ea = inspyred.ec.DEA(prng)
30 | if display:
31 | ea.observer = inspyred.ec.observers.stats_observer
32 | ea.terminator = inspyred.ec.terminators.evaluation_termination
33 | final_pop = ea.evolve(generator=generate_rastrigin,
34 | evaluator=inspyred.ec.evaluators.parallel_evaluation_pp,
35 | pp_evaluator=evaluate_rastrigin,
36 | pp_dependencies=(my_squaring_function,),
37 | pp_modules=("math",),
38 | pop_size=8,
39 | bounder=inspyred.ec.Bounder(-5.12, 5.12),
40 | maximize=False,
41 | max_evaluations=256,
42 | num_inputs=3)
43 |
44 | if display:
45 | best = max(final_pop)
46 | print('Best Solution: \n{0}'.format(str(best)))
47 | return ea
48 |
49 | if __name__ == '__main__':
50 | main(display=True)
51 |
--------------------------------------------------------------------------------
/NSGA2/nsga_example.py:
--------------------------------------------------------------------------------
1 | from random import Random
2 | from time import time
3 | import inspyred
4 |
5 | def main(prng=None, display=False):
6 | if prng is None:
7 | prng = Random()
8 | prng.seed(time())
9 |
10 | problem = inspyred.benchmarks.Kursawe(3)
11 | ea = inspyred.ec.emo.NSGA2(prng)
12 | ea.variator = [inspyred.ec.variators.blend_crossover,
13 | inspyred.ec.variators.gaussian_mutation]
14 | ea.terminator = inspyred.ec.terminators.generation_termination
15 | final_pop = ea.evolve(generator=problem.generator,
16 | evaluator=problem.evaluator,
17 | pop_size=100,
18 | maximize=problem.maximize,
19 | bounder=problem.bounder,
20 | max_generations=80)
21 |
22 | if display:
23 | final_arc = ea.archive
24 | print('Best Solutions: \n')
25 | for f in final_arc:
26 | print(f)
27 | import matplotlib.pyplot as plt
28 | x = []
29 | y = []
30 | for f in final_arc:
31 | x.append(f.fitness[0])
32 | y.append(f.fitness[1])
33 | plt.scatter(x, y, color='b')
34 | # plt.savefig('{0} Example ({1}).pdf'.format(ea.__class__.__name__,
35 | # problem.__class__.__name__),
36 | # format='pdf')
37 | plt.show()
38 | return ea
39 |
40 | if __name__ == '__main__':
41 | main(display=True)
42 |
--------------------------------------------------------------------------------
/NSGA2/parallel_evaluation_pp_example.py:
--------------------------------------------------------------------------------
1 | from random import Random
2 | from time import time
3 | import inspyred
4 | import math
5 |
6 | # Define an additional "necessary" function for the evaluator
7 | # to see how it must be handled when using pp.
8 | def my_squaring_function(x):
9 | return x**2
10 |
11 | def generate_rastrigin(random, args):
12 | size = args.get('num_inputs', 10)
13 | return [random.uniform(-5.12, 5.12) for i in range(size)]
14 |
15 | def evaluate_rastrigin(candidates, args):
16 | fitness = []
17 | for cs in candidates:
18 | fit = 10 * len(cs) + sum([(my_squaring_function(x - 1) -
19 | 10 * math.cos(2 * math.pi * (x - 1)))
20 | for x in cs])
21 | fitness.append(fit)
22 | return fitness
23 |
24 | def main(prng=None, display=False):
25 | if prng is None:
26 | prng = Random()
27 | prng.seed(time())
28 |
29 | ea = inspyred.ec.DEA(prng)
30 | if display:
31 | ea.observer = inspyred.ec.observers.stats_observer
32 | ea.terminator = inspyred.ec.terminators.evaluation_termination
33 | final_pop = ea.evolve(generator=generate_rastrigin,
34 | evaluator=inspyred.ec.evaluators.parallel_evaluation_pp,
35 | pp_evaluator=evaluate_rastrigin,
36 | pp_dependencies=(my_squaring_function,),
37 | pp_modules=("math",),
38 | pop_size=8,
39 | bounder=inspyred.ec.Bounder(-5.12, 5.12),
40 | maximize=False,
41 | max_evaluations=256,
42 | num_inputs=3)
43 |
44 | if display:
45 | best = max(final_pop)
46 | print('Best Solution: \n{0}'.format(str(best)))
47 | return ea
48 |
49 | if __name__ == '__main__':
50 | main(display=True)
51 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Packages:GeoPy
2 | Author: Liangjun Zhu
3 | Email: zlj@lreis.ac.cn
4 |
5 | If this repository occurs to you, it is truely my pleasure. And, I hope any of this code could make a help for you.
6 | At fact, this repository is a collection of my study on python, especially based on Arcpy(ArcGIS 9.3.x ~ 10.x). Every single .py file or folder can work independently.
7 | Next, I will try my best to make a clear instrument for each function.
8 |
9 | 1. MultiValue2Zones
10 | This script will statistic the value of given rasters within the
11 | zones of another polygon shapefile and report the results to a
12 | CSV file.
13 | This script can calculate values included "MEAN","MAJORITY",
14 | "MAXIMUM","MEDIAN","MINIMUM","MINORITY","RANGE","STD","SUM",
15 | "VARIETY". Each raster's value will be appended to the origin
16 | shapefile's attribute table and named by the corresponding
17 | raster's name.
18 |
19 | 2. AddNearAtrributesDirections
20 | This script is used to identify the surrounding polygon and add an attribute to save the relative direction, respectively.
21 | For detail of instrument and usage, please go to http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=126293&extra=&page=1 ,
22 | http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=60835&extra=&page=1 and http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=85765.
23 |
24 | 3. CSV2PtsShp
25 | This is a very simple but useful script. To convert .CSV points coordinates to ESRI shapefile.
26 | 4. RUSLE_LS
27 | Calculates LS Factor using DEM data according to RUSLE-based criteria.
28 | -- RUSLE_LS_4_PC.AML is a AML script based on Arcinfo workstation, code original from Rick D. Van Remortel etc.
29 | -- RUSLE_LS(Tool).py is a python version to accomplish the same function based on ArcGIS 9.3.
30 | This is my advice, if your data is huge, please use the AML code, since the python version is less efficient.
31 |
32 |
33 | 5. Hydrograph
34 | This script is based on matplotlib. Download the script and test data, you can get the hydrograph.
35 | 6.
36 |
--------------------------------------------------------------------------------
/RUSLE_LS/RUSLE.tbx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RUSLE_LS/RUSLE.tbx
--------------------------------------------------------------------------------
/RUSLE_LS/RUSLE_LS(Tool).py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RUSLE_LS/RUSLE_LS(Tool).py
--------------------------------------------------------------------------------
/RillPy/Hillslope.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | from Util import *
3 | import numpy
4 |
5 | ## Functions for Hillslope Delineating ##
6 | def isFirstStreamCell(StreamRaster, nodata, row, col, flow_dir):
7 | nrows,ncols = StreamRaster.shape
8 | if(StreamRaster[row][col] == nodata):
9 | return False
10 | else:
11 | for di in [-1,0,1]:
12 | for dj in [-1,0,1]:
13 | ni = row + di
14 | nj = col + dj
15 | if ni < 0 or nj < 0 or ni >= nrows or nj >= ncols or flow_dir[ni][nj] <=0:
16 | continue
17 | if downstream_index(flow_dir[ni][nj], ni, nj) == (row,col) and (StreamRaster[ni][nj] != nodata):
18 | return False
19 | return True
20 | def isStreamSegmentCell(StreamRaster, nodata, row, col, flow_dir):
21 | ## 1 means First cell, 2 means Finally cell, 3 means middle cells.
22 | nrows,ncols = StreamRaster.shape
23 | count = 0
24 | if StreamRaster[row][col] == nodata:
25 | return 0
26 | else:
27 | for di in [-1,0,1]:
28 | for dj in [-1,0,1]:
29 | ni = row + di
30 | nj = col + dj
31 | if ni < 0 or nj < 0 or ni >= nrows or nj >= ncols or flow_dir[ni][nj] <=0:
32 | continue
33 | if downstream_index(flow_dir[ni][nj], ni, nj) == (row,col) and (StreamRaster[ni][nj] == StreamRaster[row][col]):
34 | count = count + 1
35 | if count >= 1:
36 | idx = downstream_index(flow_dir[row][col], row, col)
37 | if idx[0] >= nrows or idx[1] >= ncols or idx[0] <0 or idx[1] < 0 or StreamRaster[idx[0]][idx[1]] == nodata:
38 | return 2
39 | else:
40 | return 3
41 | else:
42 | return 1
43 | def GetRillStartIdx(StreamLinks,nodata,FlowDir):
44 | # Get first cell index of each rill
45 | nrows,ncols = StreamLinks.shape
46 | countRill = 0
47 | countmid = 0
48 | countend = 0
49 | RillStartIdx = []
50 | for i in range(nrows):
51 | for j in range(ncols):
52 | if (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 1):
53 | countRill = countRill + 1
54 | RillStartIdx.append((i,j))
55 | elif (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 3):
56 | countend = countend + 1
57 | elif (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 2):
58 | countmid = countmid + 1
59 |
60 | #print "Rill number is : %s,%s,%s" % (countRill,countmid,countend)
61 | return RillStartIdx
62 |
63 | def fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,row,col):
64 | nrows,ncols = flow_dir.shape
65 | for di in [-1,0,1]:
66 | for dj in [-1,0,1]:
67 | tempRow = di + row
68 | tempCol = dj + col
69 | if tempRow < 0 or tempCol < 0 or tempRow >= nrows or tempCol >= ncols:
70 | continue
71 | if downstream_index(flow_dir[tempRow][tempCol],tempRow,tempCol)==(row,col) and stream[tempRow][tempCol] == nodata:
72 | if hillslp[tempRow][tempCol] != 1:
73 | hillslp[tempRow][tempCol] = value
74 | #print tempRow,tempCol
75 | fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,tempRow,tempCol)
76 |
77 | def DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile):
78 | print "Delineating hillslopes (header, left, and right hillslope)..."
79 | StreamLinks = ReadRaster(StreamFile).data
80 | nodata = ReadRaster(StreamFile).noDataValue
81 | geotrans = ReadRaster(StreamFile).geotrans
82 | FlowDir = ReadRaster(FlowDirFile).data
83 | nrows,ncols = StreamLinks.shape
84 | count = 0
85 | SourcePtsIdx = []
86 | for i in range(nrows):
87 | for j in range(ncols):
88 | if(isFirstStreamCell(StreamLinks,nodata,i,j,FlowDir)):
89 | count = count +1
90 | SourcePtsIdx.append((i,j))
91 |
92 | #print "Headwater point:%s" % count
93 | #test = GetRillStartIdx(StreamLinks,nodata,FlowDir)
94 | HillslopeMtx = numpy.ones((nrows,ncols))
95 | if nodata != -9999:
96 | HillslopeMtx = HillslopeMtx * -9999
97 | else:
98 | HillslopeMtx = HillslopeMtx * nodata
99 | for SourcePt in SourcePtsIdx:
100 | #print SourcePt
101 | cRow,cCol = SourcePt
102 | for di in [-1,0,1]:
103 | for dj in [-1,0,1]:
104 | ci = cRow + di
105 | cj = cCol + dj
106 | if ci < 0 or cj < 0 or ci >= nrows or cj >= ncols:
107 | continue
108 | if downstream_index(FlowDir[ci][cj],ci,cj)==(cRow,cCol):
109 | HillslopeMtx[ci][cj] = 0
110 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,0,ci,cj)
111 | previous = SourcePt
112 | current = downstream_index(FlowDir[cRow][cCol],cRow,cCol)
113 |
114 | while not(current[0] < 0 or current[1] < 0 or current[0] >= nrows or current[1] >= ncols):
115 | CurRow = current[0]
116 | CurCol = current[1]
117 | StreamLinkValue = StreamLinks[CurRow][CurCol]
118 | DirIdx = DIR_VALUES.index(FlowDir[CurRow][CurCol])
119 | if DirIdx <= 7:
120 | Clockwise = range(DirIdx + 1, 8)
121 | for i in range(DirIdx):
122 | Clockwise.append(i)
123 | CounterClock = list(reversed(Clockwise))
124 | if isStreamSegmentCell(StreamLinks,nodata,CurRow,CurCol,FlowDir) == 1:
125 | Clockwise = Clockwise[0:4]
126 | CounterClock = CounterClock[0:4]
127 | if isStreamSegmentCell(StreamLinks,nodata,CurRow,CurCol,FlowDir) == 2:
128 | DirIdx = DIR_VALUES.index(FlowDir[previous[0]][previous[1]])
129 | Clockwise = range(DirIdx + 1, 8)
130 | for i in range(DirIdx):
131 | Clockwise.append(i)
132 | CounterClock = list(reversed(Clockwise))
133 | Clockwise = Clockwise[0:4]
134 | CounterClock = CounterClock[0:4]
135 | for Dir in Clockwise:
136 | temprow = CurRow + DIR_ITEMS[DIR_VALUES[Dir]][0]
137 | tempcol = CurCol + DIR_ITEMS[DIR_VALUES[Dir]][1]
138 | if temprow < 0 or tempcol < 0 or temprow >= nrows or tempcol >= ncols:
139 | continue
140 | if downstream_index(FlowDir[temprow][tempcol],temprow,tempcol) == (CurRow,CurCol):
141 | if StreamLinks[temprow][tempcol] == StreamLinkValue:
142 | break
143 | elif StreamLinks[temprow][tempcol] != nodata:
144 | continue
145 | else:
146 | HillslopeMtx[temprow][tempcol] = 1
147 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,1,temprow,tempcol)
148 | for Dir in CounterClock:
149 | temprow = CurRow + DIR_ITEMS[DIR_VALUES[Dir]][0]
150 | tempcol = CurCol + DIR_ITEMS[DIR_VALUES[Dir]][1]
151 | if temprow < 0 or tempcol < 0 or temprow >= nrows or tempcol >= ncols:
152 | continue
153 | if downstream_index(FlowDir[temprow][tempcol],temprow,tempcol) == (CurRow,CurCol):
154 | if StreamLinks[temprow][tempcol] == StreamLinkValue:
155 | break
156 | elif StreamLinks[temprow][tempcol] != nodata:
157 | continue
158 | elif HillslopeMtx[temprow][tempcol] != 1:
159 | HillslopeMtx[temprow][tempcol] = 2
160 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,2,temprow,tempcol)
161 | previous = current
162 | current = downstream_index(FlowDir[CurRow][CurCol],CurRow,CurCol)
163 | WriteAscFile(HillslpFile, HillslopeMtx,ncols,nrows,geotrans,-9999)
164 |
165 |
166 |
167 | if __name__=='__main__':
168 | streamf = r'C:\Users\ZhuLJ\Desktop\test\stream'
169 | flowdirf = r'C:\Users\ZhuLJ\Desktop\test\grid_fdir_1'
170 | hillslpf = r'C:\Users\ZhuLJ\Desktop\test\hillslope_test.asc'
171 | DelineateHillslopes(streamf, flowdirf, hillslpf)
--------------------------------------------------------------------------------
/RillPy/Memo.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | ## Functional test!
3 | ##UpStreamShp = RillExtDir + os.sep + "UpStream.shp"
4 | ##arcpy.CreateFeatureclass_management(RillExtDir, "UpStream.shp", "POLYLINE", "", "DISABLED", "DISABLED", "")
5 | ##arcpy.Append_management(["north.shp", "south.shp", "east.shp", "west.shp"], UpStreamShp, "NO_TEST","","")
6 | import math,copy
7 | #Elev = [398.64911,395.37039,389.93884,382.65137,375.08615,368.56583,365.2388,363.12885,362.1973,361.80881]
8 | #Elev = [383.33521,381.29871,377.68607,372.73752,366.91272,361.18701,356.59479,353.28427,349.79819,347.04926,344.62747,343.27286,341.56818,339.35349,335.77808,330.39804,323.68604,317.19299,312.7785,310.86328,310.17453,308.9617,308.42947,308.30804]
9 | #
10 | #Length = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
11 | #k = []
12 | #k2 = []
13 | #for i in range(1,len(Elev)):
14 | # #print Elev[i]
15 | # tempk = math.atan((Elev[i]-Elev[i-1])/(Length[i]-Length[i-1]))*180./math.pi
16 | # if tempk < 0:
17 | # tempk = 180 + tempk
18 | # k.append(tempk)
19 | #print k
20 | #for i in range(1,len(k)):
21 | # tempk2 = math.atan((k[i]-k[i-1])/(Length[i+1]-Length[i]))*180./math.pi
22 | # if tempk2 < 0:
23 | # tempk2 = 180 + tempk2
24 | # k2.append(tempk2)
25 | #print k2
26 | #print len(k2),k2.index(max(k2))
27 | #curRouteSOS = [57.239685,68.406319,60.301327,44.899513,57.68351,61.255352,28.919455,31.739635,47.483128,70.880402,74.572784,70.896515,46.440041]
28 | #curRouteSlp = [6.942997,17.525024,25.871187,29.097631,33.821583,45.065792,52.355793,51.575272,47.721035,38.855087,26.716295,13.982291,6.188683]
29 | #curRouteElev = [405.30917,404.47617,402.88577,400.73474,397.63855,393.18539,386.34515,378.82489,372.15216,366.53317,364.11539,362.88748,362.13895]
30 | #lowerMaxSOS = max(curRouteSOS) * 0.9 #- 0.05 * (max(curRouteSOS) - min(curRouteSOS))
31 | #MaxSlpIdx = curRouteSlp.index(max(curRouteSlp))
32 | #MaxSOSIdx = curRouteSOS.index(max(curRouteSOS))
33 | #temp = copy.copy(curRouteSOS)
34 | #temp.sort()
35 | #SecSOSIdx = curRouteSOS.index(temp[len(temp)-2])
36 | #EdgeIdx = 0
37 | #if MaxSlpIdx >= min(MaxSOSIdx,SecSOSIdx) and MaxSlpIdx <= max(MaxSOSIdx,SecSOSIdx):
38 | # for i in range(min(MaxSOSIdx,SecSOSIdx)+1): #,max(MaxSOSIdx,SecSOSIdx)):
39 | # if curRouteSlp[i] >= 20:
40 | # EdgeIdx = i
41 | # break
42 | #for i in range(9):
43 | # if curRouteSOS[i] >= lowerMaxSOS and curRouteSlp[i] >= 20:
44 | # if EdgeIdx != 0:
45 | # EdgeIdx = min(EdgeIdx, i)
46 | # else:
47 | # EdgeIdx = i
48 | # print EdgeIdx
49 | # break
50 |
51 | #MaxSOSIdx = curRouteSOS.index(max(curRouteSOS))
52 | #tempSOS = copy.copy(curRouteSOS)
53 | #tempSOS.sort()
54 | #SecSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-2])
55 | #if len(curRouteElev) > 3:
56 | # if MaxSOSIdx in range(len(curRouteElev)-3,len(curRouteElev)):
57 | # MaxSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-3])
58 | # SecSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-2])
59 | #
60 | #lowerMaxSOS = curRouteSOS[MaxSOSIdx] * 0.9 #- 0.05 * (max(curRouteSOS) - min(curRouteSOS))
61 | #MaxSlpIdx = curRouteSlp.index(max(curRouteSlp))
62 | #EdgeIdx = 9999
63 | #if MaxSlpIdx >= min(MaxSOSIdx,SecSOSIdx) and MaxSlpIdx <= max(MaxSOSIdx,SecSOSIdx):
64 | # for i in range(min(MaxSOSIdx,SecSOSIdx)+1): #,max(MaxSOSIdx,SecSOSIdx)):
65 | # if curRouteSlp[i] >= 20:
66 | # EdgeIdx = i
67 | # break
68 | #for i in range(11):
69 | # if curRouteSOS[i] >= lowerMaxSOS and curRouteSlp[i] >= 20:
70 | # if EdgeIdx != 9999:
71 | # EdgeIdx = min(EdgeIdx, i)
72 | # break
73 | # else:
74 | # EdgeIdx = i
75 | # break
76 | #print EdgeIdx
77 |
78 |
79 | lists = [[[55, 62], [56, 62], [57, 62], [58, 62], [59, 63], [60, 64]], [[58, 63], [59, 64], [60, 64]], [[57, 63], [58, 64], [59, 64], [60, 64]], [[56, 63], [57, 64], [58, 64], [59, 64], [60, 64]], [[51, 59], [52, 60], [53, 61], [54, 62], [55, 63], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[43, 60], [44, 60], [45, 60], [46, 60], [47, 60], [48, 60], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[45, 61], [46, 61], [47, 61], [48, 61], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[37, 60], [38, 60], [39, 61], [40, 61], [41, 61], [42, 61], [43, 61], [44, 61], [45, 62], [46, 62], [47, 62], [48, 61], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]]]
80 | #print lists
81 | f = open(r'e:\test.txt','w')
82 | for list in lists:
83 | ##print list
84 | f.write(str(list))
85 | f.write('\n')
86 | f.close()
87 | count = 0
88 | for line in open(r'e:\test.txt'):
89 | count = count + 1
90 | s = eval(line)
91 | print len(s)
92 | print count
--------------------------------------------------------------------------------
/RillPy/README.txt:
--------------------------------------------------------------------------------
1 | RillPy is a tool for rill extraction and rill morphological characteristics calculation based on Arcpy,gdal,Scipy etc.
2 |
3 | The whole tool contains of several modules:
4 | --Util.py Some fundamental functions.
5 | --Subbasin.py Subbasin delineation functions.
6 | --Hillslope.py Hillslope delineation functions.
7 | --Rill.py
8 | --ShoulderLine.py
9 | --main.py Configure the whole tool and make the entrance.
10 | Functions in detail as follows.
11 | --Util
12 | ---currentPath()
13 | ---makeResultFolder(rootdir)
14 | ---downstream_index(DIR_VALUE, i, j)
15 | ---ReadRaster(rasterFile)
16 | ---WriteAscFile(filename, data, xsize, ysize, geotransform, noDataValue)
17 | ---WriteGTiffFile(filename, nRows, nCols, data, geotransform, srs, noDataValue, gdalType)
18 | ---WriteGTiffFileByMask(filename, data, mask, gdalType)
19 | ---NashCoef(qObs, qSimu)
20 | ---RMSE(list1, list2)
21 | ---StdEv(list1)
22 | ---UtilHydroFiles(DEMsrc, PreprocessDir)
23 | ---RemoveLessPts(RasterFile,num,OutputRaster)
24 |
25 | --Subbasin
26 | ---GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,threshold,folder)
27 | ---GenerateWatershedByStream
28 | ---RillIndexCalc(DEMbuf,StreamOrder)
29 | ---
30 | --Hillslope
31 | ---isFirstStreamCell(StreamRaster, nodata, row, col, flow_dir)
32 | ---isStreamSegmentCell(StreamRaster, nodata, row, col, flow_dir)
33 | ---GetRillStartIdx(StreamLinks,nodata,FlowDir)
34 | ---fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,row,col)
35 | ---DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile)
36 | ---
37 | --Rill
38 | ---IdentifyRillRidges(HillslpFile,StreamFile,FlowDirFile,FlowAccFile,WatershedFile,DEMfil,folder)
39 | ---
40 | In the main.py, there are several parameters:
41 | --DEMsrc DEM source in which rill erosion occurs.
42 | --rootdir The result folder. If the folder is not exists, it will be make; if rootdir
43 | is "", the result folder will in the current folder and named "RillPyResults"
44 | In the rootdir, four folders will be created.
45 | 0Temp, 1Preprocess, 2Rill, 3Stats
46 | --streamTHR Threshold for initial streamlinks and subbasions extraction.
47 | If streamTHR = 0, the program will set the threshold as 1% percent of
48 | accumulation by default; if 0< streamTHR < 1, it will be streamTHR*accum;
49 | else if streamTHR > 1, it is the threshold.
--------------------------------------------------------------------------------
/RillPy/ShoulderLine.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | from Util import *
3 | from Hillslope import *
4 | from Subbasin import *
5 | import os,sys
6 |
7 | def IdentifyRillShoulderPts(Aspect,Slope,ProfC,alpha,beta,ShoulderPts):
8 | aspect = ReadRaster(Aspect).data
9 | nrows,ncols = aspect.shape
10 | nodata = ReadRaster(Aspect).noDataValue
11 | geotrans = ReadRaster(Aspect).geotrans
12 | slope = ReadRaster(Slope).data
13 | profc = ReadRaster(ProfC).data
14 | ShoulderPtsMtx = numpy.ones((nrows,ncols))
15 | if nodata != -9999:
16 | ShoulderPtsMtx = ShoulderPtsMtx * -9999
17 | else:
18 | ShoulderPtsMtx = ShoulderPtsMtx * nodata
19 |
20 | for i in range(nrows):
21 | for j in range(ncols):
22 | # North
23 | if (aspect[i][j] >= 0 and aspect[i][j] < 22.5) or (aspect[i][j] >= 337.5 and aspect[i][j] < 360):
24 | if not(i-1 < 0 or i+1 >= nrows):
25 | if (slope[i][j]alpha and (slope[i-1][j]-slope[i+1][j] > beta) and profc[i][j]<0:
26 | ShoulderPtsMtx[i][j] = 1
27 | continue
28 | # Northeast
29 | if (aspect[i][j] >= 22.5 and aspect[i][j] < 67.5):
30 | if not(i-1 < 0 or i+1>nrows or j-1<0 or j+1 >= ncols):
31 | if (slope[i][j]alpha and (slope[i-1][j+1]-slope[i+1][j-1] > beta) and profc[i][j]<0:
32 | ShoulderPtsMtx[i][j] = 1
33 | continue
34 | # East
35 | if (aspect[i][j] >= 67.5 and aspect[i][j] < 112.5):
36 | if not(j-1 < 0 or j+1 >= ncols):
37 | if (slope[i][j]alpha and (slope[i][j+1]-slope[i][j-1] > beta) and profc[i][j]<0:
38 | ShoulderPtsMtx[i][j] = 1
39 | continue
40 | # Southeast
41 | if (aspect[i][j] >= 112.5 and aspect[i][j] < 157.5):
42 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols):
43 | if (slope[i][j]alpha and (slope[i+1][j+1]-slope[i-1][j-1] > beta) and profc[i][j]<0:
44 | ShoulderPtsMtx[i][j] = 1
45 | continue
46 | # South
47 | if (aspect[i][j] >= 157.5 and aspect[i][j] < 202.5):
48 | if not(i-1 < 0 or i+1 >= nrows):
49 | if (slope[i][j]alpha and (slope[i+1][j]-slope[i-1][j] > beta) and profc[i][j]<0:
50 | ShoulderPtsMtx[i][j] = 1
51 | continue
52 | # Southwest
53 | if (aspect[i][j] >= 202.5 and aspect[i][j] < 247.5):
54 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols):
55 | if (slope[i][j]alpha and (slope[i+1][j-1]-slope[i-1][j+1] > beta) and profc[i][j]<0:
56 | ShoulderPtsMtx[i][j] = 1
57 | continue
58 | # West
59 | if (aspect[i][j] >= 247.5 and aspect[i][j] < 292.5):
60 | if not(j-1 < 0 or j+1 >= ncols):
61 | if (slope[i][j]alpha and (slope[i][j-1]-slope[i][j+1] > beta) and profc[i][j]<0:
62 | ShoulderPtsMtx[i][j] = 1
63 | continue
64 | # Northwest
65 | if (aspect[i][j] >= 292.5 and aspect[i][j] < 337.5):
66 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols):
67 | if (slope[i][j]alpha and (slope[i-1][j-1]-slope[i+1][j+1] > beta) and profc[i][j]<0:
68 | ShoulderPtsMtx[i][j] = 1
69 | continue
70 | WriteAscFile(ShoulderPts, ShoulderPtsMtx,ncols,nrows,geotrans,-9999)
71 |
72 | def RillShoulderSegement(Boundary,FlowDir,ShoulderPts,ShoulderFile):
73 | flowdir = ReadRaster(FlowDir).data
74 | flownodata = ReadRaster(FlowDir).noDataValue
75 | geotrans = ReadRaster(FlowDir).geotrans
76 | boundary = ReadRaster(Boundary).data
77 | shoulderpts = ReadRaster(ShoulderPts).data
78 | nrows,ncols = flowdir.shape
79 | nodata = ReadRaster(Boundary).noDataValue
80 | bndIdx = []
81 | for i in range(nrows):
82 | for j in range(ncols):
83 | if boundary[i][j] != nodata:
84 | #print i,j
85 | bndIdx.append((i,j))
86 | iterate = 0
87 | changed = 1
88 | while not(changed == 0 or iterate > 150):
89 | print "iterate time:%s, changed num:%s, boundary num:%s" % (iterate,changed,len(bndIdx))
90 | changed = 0
91 | tempbndIdx = []
92 | for bnd in bndIdx:
93 | if shoulderpts[bnd[0]][bnd[1]] == 1:
94 | tempbndIdx.append((bnd[0],bnd[1]))
95 | else:
96 | row,col = downstream_index(flowdir[bnd[0]][bnd[1]], bnd[0],bnd[1])
97 | if row < 0 or row >= nrows or col < 0 or col >= ncols:
98 | tempbndIdx.append((bnd[0],bnd[1]))
99 | else:
100 | tempbndIdx.append((row,col))
101 | changed = changed + 1
102 | tempbndIdx = list(set(tempbndIdx))
103 | bndIdx = tempbndIdx
104 | iterate = iterate + 1
105 | shoulder = numpy.ones((nrows,ncols))
106 | shoulder = shoulder * nodata
107 | for sd in bndIdx:
108 | shoulder[sd[0]][sd[1]] = 1
109 | WriteAscFile(ShoulderFile, shoulder,ncols,nrows,geotrans,nodata)
110 |
111 | def RillShoulder(BasinFile,FlowDir,ShoulderPts,tempDir,ShoulderFile):
112 | UniqueBasinId = GetUniqueValues(BasinFile)
113 | print UniqueBasinId
114 | for BsnID in UniqueBasinId:
115 | tempBsnID = []
116 | tempBsnID.append(BsnID)
117 | BsnASC = tempDir + os.sep + "BsnID" + str(BsnID) + ".asc"
118 | ExtractBasinBoundary(BasinFile,tempBsnID,BsnASC)
119 | ShldASC = tempDir + os.sep + "Shld" + str(BsnID) + ".asc"
120 | RillShoulderSegement(BsnASC,FlowDir,ShoulderPts,ShldASC)
121 |
122 |
--------------------------------------------------------------------------------
/RillPy/Subbasin.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | import os,numpy
3 | import arcpy
4 | from arcpy import env
5 |
6 | from Util import *
7 |
8 | def GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,threshold,folder):
9 | print "Generating initial stream network according to threshold of flow accumulation..."
10 | env.workspace = folder
11 | arcpy.gp.overwriteOutput = 1
12 | arcpy.CheckOutExtension("Spatial")
13 | threshold = float(threshold)
14 | maxAcc = float(str(arcpy.GetRasterProperties_management(FlowAccFile,"MAXIMUM")))
15 | if threshold < MINI_VALUE:
16 | threshold = maxAcc / 100
17 | elif threshold >= MINI_VALUE and threshold <= 1:
18 | threshold = maxAcc * threshold
19 | elif threshold > 1:
20 | threshold = threshold
21 | Exec = "Con(\"%s\" > %s,1)" % (FlowAccFile, threshold)
22 | arcpy.gp.RasterCalculator_sa(Exec, "streamnet")
23 | Stream_shp = "streamnet.shp"
24 | arcpy.sa.StreamToFeature("streamnet",FlowDirFile,Stream_shp,"NO_SIMPLIFY")
25 | StreamLinks = arcpy.sa.StreamLink("streamnet",FlowDirFile)
26 | StreamLinks.save("streamlinks")
27 | StreamLinks_shp = "streamnet.shp"
28 | arcpy.sa.StreamToFeature("streamlinks",FlowDirFile,StreamLinks_shp,"NO_SIMPLIFY")
29 | StreamOrder = arcpy.sa.StreamOrder("streamnet",FlowDirFile,"STRAHLER")
30 | StreamOrder.save("streamorder")
31 | StreamOrderFile = folder + os.sep + "StreamOrder.shp"
32 | arcpy.sa.StreamToFeature("streamorder",FlowDirFile,StreamOrderFile,"NO_SIMPLIFY")
33 | Watershed = arcpy.sa.Watershed(FlowDirFile,"streamlinks","VALUE")
34 | Watershed.save("watershed")
35 | arcpy.RasterToPolygon_conversion("watershed","Watershed.shp","NO_SIMPLIFY","VALUE")
36 | WatershedFile = folder + os.sep + "watershed"
37 | StreamFile = folder + os.sep + "streamlinks"
38 | return (StreamFile,StreamOrderFile,WatershedFile)
39 | def RillIndexCalc(StreamOrderFile,DEMbuf,tempDir,StatsDir):
40 | print "Calculating rill indexes..."
41 | #input StreamOrderFile and DEMbuf,output CSV files.
42 | env.workspace = tempDir
43 | arcpy.gp.overwriteOutput = 1
44 | arcpy.CheckOutExtension("Spatial")
45 | dem_des = arcpy.gp.describe(DEMbuf)
46 | env.extent = dem_des.Extent
47 | arcpy.FeatureVerticesToPoints_management(StreamOrderFile,"StreamNDsStart.shp","START")
48 | arcpy.FeatureVerticesToPoints_management(StreamOrderFile,"StreamNDsEnd.shp","END")
49 | arcpy.AddXY_management("StreamNDsStart.shp")
50 | arcpy.AddXY_management("StreamNDsEnd.shp")
51 | arcpy.sa.ExtractValuesToPoints("StreamNDsStart.shp",DEMbuf,"StreamNDsElevStart.shp","NONE", "VALUE_ONLY")
52 | arcpy.sa.ExtractValuesToPoints("StreamNDsEnd.shp",DEMbuf,"StreamNDsElevEnd.shp","NONE", "VALUE_ONLY")
53 |
54 | def GenerateWatershedByStream(StreamFile,FlowDirFile, tempDir, WatershedFile):
55 | print "Regenerating watershed by real rill network..."
56 | arcpy.CheckOutExtension("spatial")
57 | arcpy.gp.overwriteOutput = 1
58 |
59 | tempStream = tempDir + os.sep + "StmNet"
60 | arcpy.ASCIIToRaster_conversion(StreamFile, tempStream,"INTEGER")
61 | Watershed = arcpy.sa.Watershed(FlowDirFile,tempStream,"VALUE")
62 | tempWtshd = tempDir + os.sep + "WtShd"
63 | Watershed.save(tempWtshd)
64 | GRID2ASC(tempWtshd,WatershedFile)
65 |
66 | def isEdge(raster,row,col,nodata):
67 | nrows,ncols = raster.shape
68 | if (row == 0 or row == nrows-1 or col == 0 or col == ncols-1) and raster[row][col] != nodata:
69 | return True
70 | elif raster[row][col] == nodata:
71 | return False
72 | else:
73 | count = 0
74 | for di in [-1,0,1]:
75 | for dj in [-1,0,1]:
76 | ni = row + di
77 | nj = col + dj
78 | if raster[ni][nj] == nodata:
79 | count = count + 1
80 | if count > 0:
81 | return True
82 | else:
83 | return False
84 |
85 | def ExtractBasinBoundary(Basin,basinID,BasinBoundary):
86 | basin = ReadRaster(Basin).data
87 | nodata = ReadRaster(Basin).noDataValue
88 | #print nodata
89 | geotrans = ReadRaster(Basin).geotrans
90 | nrows,ncols = basin.shape
91 | Boundary = numpy.ones((nrows,ncols))
92 | if nodata != -9999:
93 | Boundary = Boundary * -9999
94 | else:
95 | Boundary = Boundary * nodata
96 |
97 | for i in range(nrows):
98 | for j in range(ncols):
99 | if basin[i][j] in basinID:
100 | #count = count + 1
101 | basin[i][j] = 1
102 | else:
103 | basin[i][j] = nodata
104 | for i in range(nrows):
105 | for j in range(ncols):
106 | if isEdge(basin,i,j,nodata):
107 | Boundary[i][j] = 1
108 | WriteAscFile(BasinBoundary, Boundary,ncols,nrows,geotrans,-9999)
109 |
--------------------------------------------------------------------------------
/RillPy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RillPy/__init__.py
--------------------------------------------------------------------------------
/RillPy/main.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """
3 | @Created : 2015-1-6
4 | @Revised : 2015-1-28 Divided into seperate files for better version control
5 |
6 | @author : Liangjun Zhu
7 | @summary : Delineating and Extracting hillslopes and real rill from DEM.
8 | @param : DEMsrc, rootdir, streamTHR
9 | @requires : ArcGIS 10.x, gdal, Scipy
10 | @references: Detail information will be found in README.txt.
11 | @contract : zlj@lreis.ac.cn
12 | """
13 | import os
14 | import Util
15 | import Subbasin
16 | import Hillslope
17 | import Rill
18 | import ShoulderLine
19 |
20 | if __name__ == '__main__':
21 | ## Input params
22 | DEMsrc = r'E:\MasterBNU\RillMorphology\test\testdem'
23 | rootdir = r'E:\MasterBNU\RillMorphology\20150130'
24 | streamTHR = 0.01
25 |
26 | ## Run algorithms
27 | tempDir,PreprocessDir,RillExtDir,StatsDir = Util.makeResultFolders(rootdir)
28 | # DEMbuf,DEMfil,SlopeFile,SOSFile,AspectFile,FlowDirFile,FlowAccFile,CurvProfFile,CurvPlanFile = Util.UtilHydroFiles(DEMsrc, PreprocessDir)
29 | # StreamFile,StreamOrderFile,WatershedFile = Subbasin.GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,streamTHR,tempDir)
30 | # Subbasin.RillIndexCalc(StreamOrderFile,DEMbuf,tempDir,StatsDir)
31 |
32 | HillslpFile = RillExtDir + os.sep + "HillSlp.asc"
33 | #Hillslope.DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile)
34 |
35 | DEMfil = PreprocessDir + os.sep + "DEMfil"
36 | StreamFile = tempDir + os.sep + "StreamLinks"
37 | WatershedFile = tempDir + os.sep + "watershed"
38 | AspectFile = PreprocessDir + os.sep + "aspect"
39 | SlopeFile = PreprocessDir + os.sep + "slope"
40 | SOSFile = PreprocessDir + os.sep + "sos"
41 | CurvProfFile = PreprocessDir + os.sep + "curvprof"
42 | FlowDirFile = PreprocessDir + os.sep + "flowdir"
43 | FlowAccFile = PreprocessDir + os.sep + "flowacc"
44 | UpStreamRouteFile = RillExtDir + os.sep + "UpstreamRoute.txt"
45 | UpStreamRouteShp = RillExtDir + os.sep + "UpstreamRoute.shp"
46 | ShoulderptsFile = RillExtDir + os.sep + "Shoulderpts.asc"
47 | RealrillFile1 = RillExtDir + os.sep + "Realrill1.asc"
48 | RealrillFile2 = RillExtDir + os.sep + "Realrill2.asc"
49 | RillEdgeFile = RillExtDir + os.sep + "RealEdge.asc"
50 | RealRillFinal = RillExtDir + os.sep + "RealRill.asc"
51 | RillStFile = RillExtDir + os.sep + "RealRillFinal.asc"
52 | OrderStFile = RillExtDir + os.sep + "RillOrderFinal.asc"
53 | FinalWtdFile = RillExtDir + os.sep + "WatershedFinal.asc"
54 | HillslpFinalFile = RillExtDir + os.sep + "HillslpFinal.asc"
55 | UpStreamRouteFinalFile = RillExtDir + os.sep + "UpstreamRouteFinal.txt"
56 | UpStreamRouteFinalShp = RillExtDir + os.sep + "UpstreamRouteFinal.shp"
57 | ShoulderptsFinalFile = RillExtDir + os.sep + "ShoulderptsFinal.asc"
58 | RealrillFile1Final = RillExtDir + os.sep + "Realrill1final.asc"
59 | #Rill.UpStreamRoute(DEMfil,WatershedFile,HillslpFile,StreamFile,FlowDirFile,RillExtDir,UpStreamRouteFile,UpStreamRouteShp)
60 | #Rill.Shoulderpts(UpStreamRouteFile,DEMfil,SlopeFile,SOSFile,RillExtDir,ShoulderptsFile,RealrillFile1)
61 | #Rill.IdentifyRillRidges(HillslpFile,StreamFile,FlowDirFile,FlowAccFile,WatershedFile,DEMfil,RealrillFile2,RillEdgeFile)
62 | #Rill.RelinkRealRill(RealrillFile1,RealrillFile2,StreamFile,FlowDirFile,RealRillFinal)
63 | #Rill.SimplifyByRillOrder(RealRillFinal,FlowDirFile,tempDir,5,RillStFile,OrderStFile)
64 | #Subbasin.GenerateWatershedByStream(RillStFile,FlowDirFile, tempDir, FinalWtdFile)
65 | #Hillslope.DelineateHillslopes(RillStFile,FlowDirFile,HillslpFinalFile)
66 | #Rill.UpStreamRoute(DEMfil,FinalWtdFile,HillslpFinalFile,RillStFile,FlowDirFile,RillExtDir,UpStreamRouteFinalFile,UpStreamRouteFinalShp)
67 | Rill.Shoulderpts(UpStreamRouteFinalFile,DEMfil,SlopeFile,SOSFile,RillExtDir,ShoulderptsFinalFile,RealrillFile1Final)
68 |
69 |
70 | #alpha = 25
71 | #beta = 5
72 | #ShoulderPtsOrig = RillExtDir + os.sep + "ShoulderPtsOrig.asc"
73 | #ShoulderLine.IdentifyRillShoulderPts(AspectFile,SlopeFile,CurvProfFile,alpha,beta,ShoulderPtsOrig)
74 | #num = 50
75 | #ShoulderPts = RillExtDir + os.sep + "ShoulderPts.asc"
76 | #Util.RemoveLessPts(ShoulderPtsOrig,num,ShoulderPts)
77 | #Basin = PreprocessDir + os.sep + "basin"
78 | #Watershed = tempDir + os.sep + "watershed"
79 | #basinID = [1,4,25,26]
80 | #BasinBoundary = PreprocessDir + os.sep + "basinBounday.asc"
81 | #Subbasin.ExtractBasinBoundary(Basin,basinID,BasinBoundary)
82 | #Shoulder = RillExtDir + os.sep + "Shoulder.asc"
83 | #ShoulderLine.RillShoulderSegement(BasinBoundary,FlowDirFile,ShoulderPts,Shoulder)
84 | #ShoulderLine.RillShoulder(Watershed,FlowDirFile,ShoulderPts,tempDir,Shoulder)
--------------------------------------------------------------------------------
/SWAT_post_process/Read_SWAT_Output_MDB.py:
--------------------------------------------------------------------------------
1 | import pyodbc
2 | import sys, os
3 |
4 |
5 | def readTable(mdbfile, tableName, findField, findValue, csvfile):
6 | odbc_conn_str = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;UID=;PWD=;' % mdbfile
7 | # print odbc_conn_str
8 | conn = pyodbc.connect(odbc_conn_str)
9 | cursor = conn.cursor()
10 | field_sel = ["YEAR", "MON", "FLOW_OUTcms", "SED_OUTtons", "NO3_OUTkg", "NH4_OUTkg", "NO2_OUTkg",
11 | "TOT_Nkg", "TOT_Pkg", "MINP_OUTkg", "ORGP_OUTkg"]
12 | field_sel_idx = []
13 | fields = []
14 | fields_str = ''
15 | for row in cursor.columns(table=tableName):
16 | fields.append(row.column_name)
17 | for field in field_sel:
18 | if field in fields:
19 | field_sel_idx.append(fields.index(field))
20 | fields_str += field
21 | fields_str += ','
22 | # print fields_str
23 | query = "SELECT * FROM %s WHERE %s=%s" % (tableName, findField, findValue)
24 | # print query
25 | cursor.execute(query)
26 | rows = cursor.fetchall()
27 | f = open(csv_file, 'w')
28 | f.write(fields_str)
29 | f.write('\n')
30 | for row in rows:
31 | row_str = ''
32 | for i in field_sel_idx:
33 | row_str += str(row[i])
34 | row_str += ","
35 | # print row
36 | # print row_str
37 | f.write(row_str)
38 | f.write('\n')
39 | f.close()
40 |
41 |
42 | def currentPath():
43 | path = sys.path[0]
44 | if os.path.isdir(path):
45 | return path
46 | elif os.path.isfile(path):
47 | return os.path.dirname(path)
48 |
49 |
50 | if __name__ == '__main__':
51 | path = currentPath()
52 | # SWAT_output_mdb_file = r'E:\data_m\QSWAT_projects\ZhongTianShe2\zts2\Scenarios\sim8\TablesOut\SWATOutput.mdb'
53 | # csv_file = r'E:\data_m\QSWAT_projects\ZhongTianShe2\zts2\Scenarios\sim8\TablesOut\rch.csv'
54 | SWAT_output_mdb_file = path + os.sep + "SWATOutput.mdb"
55 | # csv_file = path + os.sep + "rch.csv"
56 | # readTable(SWAT_output_mdb_file, "rch", "SUB", 11, csv_file)
57 | ## the following code can export all reaches
58 | subbsnNum = 15
59 | for i in range(1, subbsnNum + 1):
60 | csv_file = path + os.sep + "rch%s.csv" % str(i)
61 | readTable(SWAT_output_mdb_file, "rch", "SUB", i, csv_file)
62 |
--------------------------------------------------------------------------------
/SWAT_post_process/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/SWAT_post_process/__init__.py
--------------------------------------------------------------------------------
/SWAT_post_process/stats_SWAT_Output_mdb.py:
--------------------------------------------------------------------------------
1 | import pyodbc
2 | import sys, os
3 | import numpy
4 |
5 |
6 | def statsOutput(mdbfile, tableName, findField, findValue, years, fieldSel, csvfile):
7 | odbc_conn_str = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;UID=;PWD=;' % mdbfile
8 | # print odbc_conn_str
9 | conn = pyodbc.connect(odbc_conn_str)
10 | cursor = conn.cursor()
11 | field_sel_idx = []
12 | fields = []
13 | fields_str = ''
14 | for row in cursor.columns(table=tableName):
15 | fields.append(row.column_name)
16 | for field in fieldSel:
17 | if field in fields:
18 | field_sel_idx.append(fields.index(field))
19 | fields_str += field
20 | fields_str += ','
21 | # print fields_str
22 | query = "SELECT * FROM %s WHERE %s=%s" % (tableName, findField, findValue)
23 | # print query
24 | cursor.execute(query)
25 | rows = cursor.fetchall()
26 | f = open(csv_file, 'w')
27 | f.write(fields_str)
28 | f.write('\n')
29 | for row in rows:
30 | row_str = ''
31 | for i in field_sel_idx:
32 | row_str += str(row[i])
33 | row_str += ","
34 | # print row
35 | # print row_str
36 | f.write(row_str)
37 | f.write('\n')
38 | f.close()
39 |
40 |
41 | def currentPath():
42 | path = sys.path[0]
43 | if os.path.isdir(path):
44 | return path
45 | elif os.path.isfile(path):
46 | return os.path.dirname(path)
47 |
48 |
49 | if __name__ == '__main__':
50 | SWAT_output_mdb_file = r'E:\data_m\QSWAT_projects\Done\baseSim_unCali\baseSim_unCali\Scenarios\Default\TablesOut\SWATOutput.mdb'
51 | csv_file = r'E:\data_m\QSWAT_projects\Done\baseSim_unCali\baseSim_unCali\Scenarios\Default\TablesOut\rch_stats.csv'
52 | # path = currentPath()
53 | # SWAT_output_mdb_file = path + os.sep + "SWATOutput.mdb"
54 | # csv_file = path + os.sep + "rch_stats.csv"
55 |
56 | field_sel = ["FLOW_OUTcms", "SED_OUTtons", "NO3_OUTkg", "NH4_OUTkg", "NO2_OUTkg", "TOT_Nkg",
57 | "TOT_Pkg", "MINP_OUTkg", "ORGP_OUTkg"]
58 | year_sel = [2014]
59 | subbsnNum = 15
60 | statsOutput(SWAT_output_mdb_file, 'rch', 'SUB', subbsnNum, year_sel, field_sel, csv_file)
61 |
--------------------------------------------------------------------------------
/SWATplusUtility/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/SWATplusUtility/__init__.py
--------------------------------------------------------------------------------
/TIN_Hydro/User manual-zhulj-2016-2-20.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/User manual-zhulj-2016-2-20.docx
--------------------------------------------------------------------------------
/TIN_Hydro/XYZ2ShpPoint_GDAL.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | ## @Generate ESRI Shapefile from XYZ point text file.
3 | ## @author: Liang-Jun Zhu
4 | ## @Date: 2016-6-17
5 | ## @Email: zlj@lreis.ac.cn
6 | #
7 | import os,sys,time
8 | from osgeo import ogr
9 | def currentPath():
10 | path = sys.path[0]
11 | if os.path.isdir(path):
12 | return path
13 | elif os.path.isfile(path):
14 | return os.path.dirname(path)
15 | def WritePointShp(vertexList,zFieldName,outShp):
16 | print "Write point shapefile: %s" % outShp
17 | driver = ogr.GetDriverByName("ESRI Shapefile")
18 | if driver is None:
19 | print "ESRI Shapefile driver not available."
20 | sys.exit(1)
21 | if os.path.exists(outShp):
22 | driver.DeleteDataSource(outShp)
23 | ds = driver.CreateDataSource(outShp.rpartition(os.sep)[0])
24 | if ds is None:
25 | print "ERROR Output: Creation of output file failed."
26 | sys.exit(1)
27 | lyr = ds.CreateLayer(outShp.rpartition(os.sep)[2].split('.')[0],None,ogr.wkbPoint)
28 | zField = ogr.FieldDefn(zFieldName,ogr.OFTReal)
29 | lyr.CreateField(zField)
30 |
31 | #vertexGeo = ogr.Geometry(ogr.wkbMultiPoint)
32 | for vertex in vertexList:
33 | vertexGeo = ogr.Geometry(ogr.wkbPoint)
34 | vertexGeo.AddPoint(vertex[0],vertex[1])
35 | featureDefn = lyr.GetLayerDefn()
36 | vertexFeature = ogr.Feature(featureDefn)
37 | vertexFeature.SetGeometry(vertexGeo)
38 | vertexFeature.SetField(zFieldName, vertex[2])
39 | lyr.CreateFeature(vertexFeature)
40 | vertexFeature.Destroy()
41 | ds.Destroy()
42 | def progress(percent):
43 | bar_length=20
44 | hashes = '#' * int(percent/100.0 * bar_length)
45 | spaces = ' ' * (bar_length - len(hashes))
46 | sys.stdout.write(" Handling: [%s] %.1f%%\n"%(hashes + spaces, percent))
47 | sys.stdout.flush()
48 | #time.sleep(1)
49 |
50 | def GeneratorPointShp(txtFile,outShp):
51 | start = time.time()
52 | lineCount = 0
53 | thefile = open(txtFile,'rb')
54 | while True:
55 | buffer = thefile.read(1024 * 8192)
56 | if not buffer:
57 | break
58 | lineCount += buffer.count('\n')
59 | thefile.close()
60 | print "There are %d points to be processed." % lineCount
61 |
62 | ## Create shapefile
63 | driver = ogr.GetDriverByName("ESRI Shapefile")
64 | if driver is None:
65 | print "ESRI Shapefile driver not available."
66 | sys.exit(1)
67 | if os.path.exists(outShp):
68 | driver.DeleteDataSource(outShp)
69 | ds = driver.CreateDataSource(outShp.rpartition(os.sep)[0])
70 | if ds is None:
71 | print "ERROR Output: Creation of output file failed."
72 | sys.exit(1)
73 | lyr = ds.CreateLayer(outShp.rpartition(os.sep)[2].split('.')[0],None,ogr.wkbPoint)
74 | xField = ogr.FieldDefn("X",ogr.OFTReal)
75 | yField = ogr.FieldDefn("Y",ogr.OFTReal)
76 | zField = ogr.FieldDefn("Z",ogr.OFTReal)
77 | lyr.CreateField(xField)
78 | lyr.CreateField(yField)
79 | lyr.CreateField(zField)
80 |
81 | count = 0
82 | with open(txtFile) as f:
83 | for line in f:
84 | pts = line.split(',')
85 | if pts !=[] and len(pts) == 4:
86 | x = float(pts[1])
87 | y = float(pts[2])
88 | z = float(pts[3])
89 | #print x,y,z
90 |
91 | vertexGeo = ogr.Geometry(ogr.wkbPoint)
92 | vertexGeo.AddPoint(x,y)
93 | featureDefn = lyr.GetLayerDefn()
94 | vertexFeature = ogr.Feature(featureDefn)
95 | vertexFeature.SetGeometry(vertexGeo)
96 | vertexFeature.SetField("X", x)
97 | vertexFeature.SetField("Y", y)
98 | vertexFeature.SetField("Z", z)
99 | lyr.CreateFeature(vertexFeature)
100 | vertexFeature.Destroy()
101 |
102 | count += 1
103 | perc = float(count)/float(lineCount) * 100
104 | if(perc%5. == 0.):
105 | progress(perc)
106 | ds.Destroy()
107 | end = time.time()
108 | secs = end - start
109 | mins = secs / 60.
110 | print "\nAll done, costs %.1f minutes!" % mins
111 | if __name__ == '__main__':
112 | currFolder = currentPath()
113 | currFolder = r'e:/test/test'
114 | filename = "xyz.txt"
115 | outfilename = "test.shp"
116 |
117 | xyzTxtFile = currFolder + os.sep + filename
118 | shpFile = currFolder + os.sep + outfilename
119 | GeneratorPointShp(xyzTxtFile, shpFile)
--------------------------------------------------------------------------------
/TIN_Hydro/_project:
--------------------------------------------------------------------------------
1 | [default]
2 | projectname = python,
3 |
4 |
--------------------------------------------------------------------------------
/TIN_Hydro/backup/CGAL-test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | from CGAL.Triangulations_2 import *
3 | from CGAL.Triangulations_3 import *
4 | from CGAL.Kernel import Point_3
5 | from CGAL.Kernel import Point_2
6 | from osgeo import ogr
7 | from gdalconst import *
8 | import os,sys
9 | from ShapefileIO import *
10 |
11 | pts2DList = [[1.1,2.1],[0.8,5],[5.2,1.9],[3.5,4.9],[6,7.4],[0.3,8],[-2,5.0]]
12 | print pts2DList
13 | dt = Delaunay_triangulation_2()
14 |
15 | for pt in pts2DList:
16 | dt.insert(Point_2(pt[0],pt[1]))
17 | #print "number of tin: %d" % dt.number_of_faces()
18 | TriangleList = []
19 | TriangleVertexList = []
20 | TriangleNbrIdxList = []
21 | VertexList = []
22 | TriangleVertexListASC = []
23 | #for v in dt.vetices:
24 | # VertexList.append([v.point()[0],v.point()[1]])
25 | #print VertexList
26 | for f in dt.faces:
27 | temppts = []
28 | tempPtsIdx = []
29 | for i in range(3):
30 | tempp = f.vertex(i).point()
31 | temppts.append([tempp[0],tempp[1]])
32 | tempPtsIdx.append(pts2DList.index([tempp[0],tempp[1]]))
33 | TriangleList.append(temppts)
34 | TriangleVertexList.append(tempPtsIdx)
35 | TriangleVertexListASC.append(sorted(tempPtsIdx))
36 | print TriangleVertexList
37 | for f in dt.faces:
38 | NbrFaceIdx = []
39 | for i in range(3):
40 | tempFaceIdx = []
41 | if dt.is_infinite(f.neighbor(i)) == False:
42 | for j in range(3):
43 | tempFaceIdx.append(pts2DList.index([f.neighbor(i).vertex(j).point()[0],f.neighbor(i).vertex(j).point()[1]]))
44 | NbrFaceIdx.append(TriangleVertexList.index(tempFaceIdx))
45 | else:
46 | NbrFaceIdx.append(None)
47 | value = NbrFaceIdx.pop(2)
48 | NbrFaceIdx.insert(0,value)
49 | TriangleNbrIdxList.append(NbrFaceIdx)
50 | print TriangleNbrIdxList
51 | #
52 | #Shp = r'E:\research\TIN-based\testtin.shp'
53 | ##WritePolyonShp(TriangleList,Shp)
54 | VertexTriangleList = []
55 | for v in dt.vertices:
56 | #print v.point()
57 | cir_faces = dt.incident_faces(v)
58 | finites_faces = []
59 | f1 = cir_faces.next()
60 | if dt.is_infinite(f1) == False:
61 | finites_faces.append(f1)
62 | for f in cir_faces:
63 | if f == f1:
64 | break
65 | else:
66 | if dt.is_infinite(f) == False:
67 | finites_faces.append(f)
68 | finites_faces_idx = []
69 | for f in finites_faces:
70 | tempFaceIdx = []
71 | for i in range(3):
72 | tempFaceIdx.append(pts2DList.index([f.vertex(i).point()[0],f.vertex(i).point()[1]]))
73 | finites_faces_idx.append(TriangleVertexList.index(tempFaceIdx))
74 | VertexTriangleList.append(finites_faces_idx)
75 | print VertexTriangleList
76 |
77 |
78 | #for f in dt.faces:
79 | # tempNeighborIdx = []
80 | # for i in range(3):
81 | # neighborFace = f.neighbor(i)
82 | # for j in range(3):
83 | # neighborFace.vertex(j).Point
84 | # neighborFace
85 | # tempNeighborIdx.append()
86 | #
87 | # print TriangleVertexList.index(tempNeighborIdx)
88 |
89 |
90 | #ptsShp = r'E:\research\TIN-based\Points_Elev.shp'
91 | #elevField = "ELEV"
92 | #tinShp = r'E:\research\TIN-based\tin.shp'
93 | #tin3DShp = r'E:\research\TIN-based\tin3D.shp'
94 | #if not ptsShp.endswith(".shp"):
95 | # print "Error Input: Please input an shapefile!"
96 | # sys.exit(1)
97 | #ptsData = ogr.Open(ptsShp)
98 | #pts3DList = []
99 | #pts2DList = []
100 | #dt = Delaunay_triangulation_2()
101 | #dt3 = Delaunay_triangulation_3()
102 | #if ptsData is None:
103 | # print "Error occurs when trying to open %s!" % ptsShp
104 | # sys.exit(1)
105 | #else:
106 | # lyr = ptsData.GetLayerByIndex(0)
107 | # if lyr.GetGeomType() != 1:
108 | # print "Error Input: Please input an point shapefile!"
109 | # sys.exit(1)
110 | # hasElev = False
111 | # for field in lyr.schema:
112 | # if field.GetName() == elevField:
113 | # hasElev = True
114 | # if not hasElev:
115 | # print "Error Input: No field matches %s" % elevField
116 | # sys.exit(1)
117 | # lyr.ResetReading()
118 | # for feat in lyr:
119 | # geom = feat.GetGeometryRef()
120 | # if geom is not None and geom.GetGeometryType() == ogr.wkbPoint:
121 | # x = geom.GetX()
122 | # y = geom.GetY()
123 | # z = float(feat.GetField(feat.GetFieldIndex(elevField)))
124 | # pts3DList.append(Point_3(x,y,z))
125 | # pts2DList.append(Point_2(x,y))
126 | #ptsData = None
127 | ##print len(ptsList)
128 | #for p in pts2DList:
129 | # dt.insert(p)
130 | #for p in pts3DList:
131 | # dt3.insert(p)
132 | #print "2D Triangulation Numbers: %d" % dt.number_of_faces()
133 | #print "3D Triangulation Numbers: %d" % dt3.number_of_facets()
134 | ## write shapefile
135 | #driver = ogr.GetDriverByName("ESRI Shapefile")
136 | #if driver is None:
137 | # print "ESRI Shapefile driver not available."
138 | # sys.exit(1)
139 | #if os.path.exists(tinShp):
140 | # driver.DeleteDataSource(tinShp)
141 | #ds = driver.CreateDataSource(tinShp.rpartition(os.sep)[0])
142 | #if ds is None:
143 | # print "ERROR Output: Creation of output file failed."
144 | # sys.exit(1)
145 | #lyr = ds.CreateLayer("tin",None,ogr.wkbPolygon)
146 | #
147 | #for f in dt.faces:
148 | # tempPts = []
149 | # tri = ogr.Geometry(ogr.wkbLinearRing)
150 | # for i in range(3):
151 | # tempp = f.vertex(i).point()
152 | # tri.AddPoint(tempp[0],tempp[1])
153 | # tempPts.append(tempp)
154 | # #print "x=%f,y=%f" % (tempp[0],tempp[1])
155 | # tri.AddPoint(tempPts[0][0],tempPts[0][1])
156 | # tinpoly = ogr.Geometry(ogr.wkbPolygon)
157 | # tinpoly.AddGeometry(tri)
158 | # tempTri = ogr.CreateGeometryFromJson(tinpoly.ExportToJson())
159 | # feature = ogr.Feature(lyr.GetLayerDefn())
160 | # feature.SetGeometry(tempTri)
161 | # lyr.CreateFeature(feature)
162 | # feature.Destroy()
163 | #ds.Destroy()
164 |
--------------------------------------------------------------------------------
/TIN_Hydro/backup/fit.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 |
3 | import scipy
4 | import numpy
5 | import xalglib
6 | if __name__ == '__main__':
7 | fit3Pt = [[719532.147, 21198.921, 3162.84], [719541.137, 21204.959, 3162.84],[719532.008, 21208.555, 3162.84]]
8 | fit3NbrPts = [[[719542.893, 21196.805, 3161.92], [719532.147, 21198.921, 3162.84], [719527.028, 21206.985, 3164.02], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84], [719520.537, 21196.113, 3170.23], [719536.501, 21186.141, 3170.06]], [[719542.893, 21196.805, 3161.92], [719532.147, 21198.921, 3162.84], [719558.451, 21209.343, 3177.76], [719544.202, 21212.663, 3174.22], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84]], [[719532.147, 21198.921, 3162.84], [719527.028, 21206.985, 3164.02], [719524.651, 21211.249, 3164.02], [719544.202, 21212.663, 3174.22], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84], [719530.92, 21218.239, 3175.19]]]
9 |
10 |
--------------------------------------------------------------------------------
/TIN_Hydro/backup/test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | import os,sys
3 | from ShapefileIO import *
4 | from TINcreator import *
5 |
6 | def findIntersectIdx(v,v0,A,B,C): ## v is three vertexes of a triangle, v0 is a point in the steepest descent vector
7 | ### the basic idea is OC=xOA+yOB, when x>0 and y>0, then OC is between OA and OB
8 | ## intersect is the first point's index of the intersected edges
9 | for i in range(3):
10 | ## OA, OB vector
11 | o1 = [v[i][0]-v0[0],v[i][1]-v0[1]]
12 | o2 = [v[(i+1)%3][0]-v0[0],v[(i+1)%3][1]-v0[1]]
13 | ## if OA and OB are collinear?
14 | k = o1[1]*o2[0]-o1[0]*o2[1]
15 | if k == 0:
16 | ## if deepest flow path vector (A/C, B/C) is also collinear?
17 | k1 = o1[1]*A/C - o1[0]*B/C
18 | k2 = o2[1]*A/C - o2[0]*B/C
19 | if k1==0 and k2==0:
20 | intersect = i
21 | else:
22 | m = (o2[0]*B/C-o2[1]*A/C) / k
23 | if o2[0] != 0:
24 | n = (A/C-m*o1[0])/o2[0]
25 | else:
26 | n = (B/C-m*o1[1])/o2[1]
27 | if m > 0 and n > 0:
28 | intersect = i
29 | return intersect
30 | def tranglePlane(p1,p2,p3):
31 | A = p1[1]*(p2[2]-p3[2])+p2[1]*(p3[2]-p1[2])+p3[1]*(p1[2]-p2[2]) ## A = y1(z2-z3)+y2(Z3-Z1)+y3(Z1-Z2)
32 | B = p1[2]*(p2[0]-p3[0])+p2[2]*(p3[0]-p1[0])+p3[2]*(p1[0]-p2[0]) ## B = Z1(x2-x3)+z2(x3-x1)+z3(x1-x2)
33 | C = p1[0]*(p2[1]-p3[1])+p2[0]*(p3[1]-p1[1])+p3[0]*(p1[1]-p2[1]) ## C = x1(y2-y3)+x2(y3-y1)+x3(y1-y2)
34 | D = -1*A*p1[0]-B*p1[1]-C*p1[2] ## D = -Ax1-By1-Cz1
35 | #print A,B,C,D
36 | return (float(A),float(B),float(C),float(D))
37 |
38 | if __name__ == '__main__':
39 | #### INPUT ####
40 | ptsShp = r'E:\research\TIN-based\20150811\flat_triangle_pts.shp'
41 | #elevField = "ELEV"
42 | elevField = "Z"
43 | workspace = r'E:\research\TIN-based\20150811'
44 | #### END ####
45 |
46 | #### DEFAULT OUTPUT ####
47 | preprocessing_pts = workspace + os.sep + 'flat_triangle_new_point.shp'
48 | tin_origin_Shp = workspace + os.sep + 'flat_triangle_tin_origin.shp'
49 | preprocessing_tin = workspace + os.sep + 'flat_triangle_tin_preprocessed.shp'
50 | steepestpath_Shp = workspace + os.sep + 'test_steepestpath.shp'
51 | #### END ####
52 |
53 | #### GLOBAL VARIABLES ####
54 | VertexList = [] ## VertexList stores 3D coordinates (x,y,z) of all the input points
55 | TriangleVertexList = [] ## TriangleList stores all the triangles, each element stores index of vertexes
56 | TriangleNbrIdxList = [] ## TriangleNbrIdx stores index of triangle's neighbors, if there is not neighbor, set it None
57 | VertexTriangleList = [] ## VertexTriangleList stores every vertex's adjacent triangles in counterclockwise
58 | #### END ####
59 |
60 | #### TEMP VARIABLES ####
61 | pts2DList = [] ## temp list to store 2D coordinates of points
62 |
63 | #### END ####
64 |
65 | #### MAIN FUNCTIONS ####
66 | VertexList,pts2DList = ReadPoints(ptsShp,elevField) ## Read input shapefile of points
67 | ## Ready to construct hydrological TIN
68 | ## 1. Create Delaunay Triangulated Irregular Network
69 | ## 2. Remove Flat triangle by insert additional point using an inverse distance weighted interpolation with quadratic nodal functions
70 | ## 3. Remove pit by using a recursive algorithm
71 | ## 4. Handle flat edges by fliping operation
72 |
73 | TriangleVertexList,TriangleNbrIdxList,VertexTriangleList,VertexList = createTIN(VertexList,pts2DList)
74 | #print VertexList[len(VertexList)-1]
75 | WritePointShp(VertexList,elevField,preprocessing_pts)
76 | WritePolyonShp(TriangleVertexList,VertexList,tin_origin_Shp)
77 | del pts2DList
78 |
79 | ###
80 | # flatTriangle = [] ## store vertexes index of flat triangles
81 | # for tri in TriangleVertexList:
82 | # p1 = VertexList[tri[0]]
83 | # p2 = VertexList[tri[1]]
84 | # p3 = VertexList[tri[2]]
85 | # if p1[2] == p2[2] and p2[2] == p3[2]:
86 | # flatTriangle.append(tri)
87 | # #flatTriangle.append(TriangleVertexList.index(tri))
88 | # print flatTriangle
89 | # for flatT in flatTriangle:
90 | # for flatV in flatT:
91 | # ${0}
92 | # v = [[1,1,4],[1,-1,3],[-1,-1,2]]
93 | # A,B,C,D = tranglePlane(v[0],v[1],v[2])
94 | # print A,B,C,D
95 | # v0 = [1.,-1.,3.]
96 | # #v0 = [(v[0][0]+v[1][0]+v[2][0])/3.,(v[0][1]+v[1][1]+v[2][1])/3.,(v[0][2]+v[1][2]+v[2][2])/3.]
97 | # if C != 0: ## if C is 0, then the triangle is on the XY plane which need to be pitremoved!
98 | # intersect = findIntersectIdx(v,v0,A,B,C)
99 | # print intersect
100 |
101 |
102 |
103 |
104 | ###beifen###
105 | # fitPtsIdx = [] ## [[[,,]...],[[,,]...],[[,,]...]]
106 | # fitPtsIdx.append([tempPtsIdx[0]])
107 | # fitPtsIdx.append([tempPtsIdx[1]])
108 | # fitPtsIdx.append([tempPtsIdx[2]])
109 | # for i in range(3):
110 | # tempVertex = f.vertex(i)
111 | # cir_faces = dt.incident_faces(tempVertex)
112 | # finites_faces = []
113 | # f1 = cir_faces.next()
114 | # if dt.is_infinite(f1) == False:
115 | # finites_faces.append(f1)
116 | # for f2 in cir_faces:
117 | # if f2 == f1:
118 | # break
119 | # else:
120 | # if dt.is_infinite(f2) == False:
121 | # finites_faces.append(f2)
122 | # for f2 in finites_faces:
123 | # for j in range(3):
124 | # fitPtsIdx[i].append(pts2DList.index([f2.vertex(j).point()[0],f2.vertex(j).point()[1]]))
125 | # fitPtsIdxUnique = []
126 | # for temp in fitPtsIdx:
127 | # fitPtsIdxUnique.append(list(set(temp)))
128 | # print fitPtsIdxUnique
129 | # fitPtsCoor = []
130 | # for ptsIdx in fitPtsIdxUnique:
131 | # tempPtsCoor = []
132 | # for inividualIdx in ptsIdx:
133 | # tempPtsCoor.append(pts[inividualIdx])
134 | # fitPtsCoor.append(tempPtsCoor)
135 | # print fitPtsCoor
136 |
--------------------------------------------------------------------------------
/TIN_Hydro/data/test.dbf:
--------------------------------------------------------------------------------
1 | s
2 | <