├── .gitignore ├── AddNearAtrributesDirections ├── AddNearAttributesDirections-CN-V1.0.py ├── AddNearAttributesDirections-EN-V1.0.py └── AddNearAttributesDirections.tbx ├── CSV2PtsShp ├── CSV2PtsShp.py └── designed_samples.csv ├── DelRepeatFeatures └── DelRepeat.py ├── DownloadModis ├── download_modis_nasa_earthdata.py └── utils.py ├── ExtractMultiValue2Zones ├── ExtMultiVal2Polygon.py ├── ExtractMultiValue2Zones.py └── ExtractMultiValue2Zones.tbx ├── ExtractRasterByMultiPolygon ├── .idea │ ├── ExtractRasterByMultiPolygon.iml │ ├── encodings.xml │ ├── misc.xml │ ├── modules.xml │ └── workspace.xml └── ExtractRasterByMultiPolygon.py ├── HydroDataDownload ├── CreateDatabase_SURF_CLI_CHN_MUL_DAY.py ├── Down_Fujian_RealTimeData_Shuizhi.py ├── Down_Fujian_shuizhi_zhoubao.py ├── GetYcRainSum_20150805-zhulm.py ├── ReadDatabase_SURF_CLI_CHN_MUL_DAY.py ├── Read_hedao20150901 - zhulj.py ├── Read_hedao20150901.py ├── anhui_precipitation_download.py ├── climate_download.py ├── netcdf4_pydap_test.py ├── test.py └── trmm_download.py ├── Hydrograph ├── Hydrograph-Storm.py ├── ObsS.txt ├── prec.txt └── simuS.txt ├── NSGA2 ├── .idea │ ├── NSGA2.iml │ ├── encodings.xml │ ├── misc.xml │ ├── modules.xml │ └── workspace.xml ├── deap │ ├── dbf_test.py │ └── demo1.py ├── inspyred │ ├── nsga_example_inspyred.py │ └── parallel_evaluation_pp_example.py ├── nsga_example.py └── parallel_evaluation_pp_example.py ├── README.md ├── RUSLE_LS ├── RUSLE.tbx ├── RUSLE_LS(Tool).py └── RUSLE_LS_4_PC.AML ├── RillChannelExtraction ├── IdentifyRillRidges.py └── RillChannelExtraction.py ├── RillPy ├── Hillslope.py ├── Memo.py ├── README.txt ├── Rill.py ├── ShoulderLine.py ├── Subbasin.py ├── Util.py ├── __init__.py └── main.py ├── SWAT_post_process ├── Read_SWAT_Output_MDB.py ├── Update_SWAT_mdb_from_SWAT_CUP.py ├── __init__.py └── stats_SWAT_Output_mdb.py ├── SWATplusUtility ├── __init__.py └── create_pond_points.py ├── TIN_Hydro ├── HydroTIN.bak ├── HydroTIN.py ├── ShapefileIO.py ├── User manual-zhulj-2016-2-20.docx ├── XYZ2ShpPoint_GDAL.py ├── _project ├── backup │ ├── CGAL-test.py │ ├── fit.py │ └── test.py ├── data │ ├── test.dbf │ ├── test.sbn │ ├── test.sbx │ ├── test.shp │ └── test.shx ├── env │ ├── Config.txt │ ├── x64_python │ │ └── CGAL-Python-0.9.4b1.win-amd64-py2.7.exe │ └── x86_python │ │ ├── CGAL-Python-0.9.4b1.win32-py2.7.exe │ │ ├── GDAL-1.11.2-cp27-none-win32.whl │ │ ├── alglib-3.10.0.cpython.gpl.zip │ │ ├── pip-7.1.0.tar.gz │ │ ├── setuptools-18.2.zip │ │ └── vcredist_x86.exe └── main.py ├── Util ├── GeoTIFF_Converter.py ├── HardenSlpPos_Compare.py ├── Similarity_Compare.py ├── TauDEM.py ├── Util.py ├── __init__.py ├── available_font_matplotlib.py ├── normalize_for_SoLIM_20141110.py ├── pond_preprocess.py ├── rasterCalculator.py └── test_chinese_matplotlib.py └── test ├── DEAP_tutorial.py ├── SCOOP_tutorial.py ├── TidyZotero.py ├── asc2tif.py ├── down_ts.py ├── down_ts_linux.py ├── mongoclient.py ├── numpy_test.py ├── pyqgis_test.py └── uniqueID_scoop.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | .hypothesis/ 46 | 47 | # Translations 48 | *.mo 49 | *.pot 50 | 51 | # Django stuff: 52 | *.log 53 | 54 | # Sphinx documentation 55 | # docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | 60 | #Ipython Notebook 61 | .ipynb_checkpoints 62 | 63 | #PyCharm Project files 64 | .idea/ 65 | .idea/* -------------------------------------------------------------------------------- /AddNearAtrributesDirections/AddNearAttributesDirections-CN-V1.0.py: -------------------------------------------------------------------------------- 1 | import arcpy 2 | from arcpy import env 3 | 4 | # --------------------------------------------------------------------------- 5 | # AddNearAttributesDirections.py 6 | # Created on: 2013-04-08 7 | # Author: Zhu Liangjun 8 | # --------------------------------------------------------------------------- 9 | 10 | 11 | #################### Inputs ######################## 12 | def setupNearAttributes(): 13 | poly_shp = arcpy.GetParameterAsText(0) 14 | nameField = arcpy.GetParameterAsText(1) 15 | fieldName = arcpy.GetParameterAsText(2) 16 | fieldLength = arcpy.GetParameterAsText(3) 17 | isDirection = arcpy.GetParameterAsText(4) 18 | Direct = arcpy.GetParameterAsText(5) 19 | outFile = arcpy.GetParameterAsText(6) 20 | arcpy.gp.overwriteOutput = 1 21 | shpDesc = arcpy.Describe(poly_shp) 22 | env.workspace = shpDesc.Path 23 | if isDirection: 24 | AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldName,fieldLength) 25 | else: 26 | AddNearAttributes(poly_shp,nameField,outFile,fieldName,fieldLength) 27 | 28 | ################### Functions ###################### 29 | def sendmsg(msg): 30 | print msg 31 | arcpy.AddMessage(msg) 32 | 33 | def CalFieldMappings(origial_shp,join_shp,nameField,fieldmappings,fieldName,Length): 34 | fieldmappings.addTable(origial_shp) 35 | AddNearPoly = arcpy.FieldMap() 36 | AddNearPoly.addInputField(join_shp,nameField) 37 | field = AddNearPoly.outputField 38 | field.name = fieldName 39 | field.aliasName = fieldName 40 | field.length = Length 41 | AddNearPoly.mergeRule = "Join" 42 | AddNearPoly.joinDelimiter = "," 43 | AddNearPoly.outputField = field 44 | fieldmappings.addFieldMap(AddNearPoly) 45 | ##sendmsg(fieldmappings.exportToString()) 46 | 47 | def AddNearAttributes(poly_shp,nameField,outFile,fieldName,fieldLength): 48 | arcpy.Copy_management(poly_shp,"temp_poly.shp") 49 | fieldmappings = arcpy.FieldMappings() 50 | CalFieldMappings(poly_shp,"temp_poly.shp",nameField,fieldmappings,fieldName,fieldLength) 51 | arcpy.SpatialJoin_analysis(poly_shp,"temp_poly.shp",outFile,"JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"INTERSECT", "", "") 52 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"]) 53 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],["+nameField+"]+\",\",\"\")","VB") 54 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],\",\"+["+nameField+"],\"\")","VB") 55 | arcpy.CalculateField_management(outFile,fieldName,"Replace(["+fieldName+"],["+nameField+"],\"\")","VB") 56 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,!"+nameField+"!+',','')","PYTHON") 57 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,','+!"+nameField+"!,'')","PYTHON") 58 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!"+fieldName+"!,!"+nameField+"!,'')","PYTHON") 59 | arcpy.Delete_management("temp_poly.shp") 60 | 61 | def AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldName,fieldLength): 62 | ##Define temporary files 63 | polytopoint_shp = "polytopoint.shp" 64 | pointneartab = "pointneartab" 65 | polyneartab = "polyneartab.dbf" 66 | 67 | try: 68 | arcpy.FeatureToPoint_management(poly_shp, polytopoint_shp, "INSIDE") 69 | arcpy.AddXY_management(polytopoint_shp) 70 | except: 71 | sendmsg(arcpy.GetMessages()) 72 | try: 73 | arcpy.GenerateNearTable_analysis(polytopoint_shp, polytopoint_shp, pointneartab, "", "NO_LOCATION", "ANGLE", "ALL", "0") 74 | arcpy.GenerateNearTable_analysis(poly_shp, poly_shp, "polyneartabTemp","0", "NO_LOCATION", "NO_ANGLE", "ALL", "0") 75 | shpDesc = arcpy.Describe(poly_shp) 76 | arcpy.TableToTable_conversion("polyneartabTemp",shpDesc.Path,polyneartab) 77 | except: 78 | sendmsg(arcpy.GetMessages()) 79 | try: 80 | arcpy.AddField_management(polyneartab,"near_link","TEXT") 81 | arcpy.AddField_management(polyneartab,"NameDirec","TEXT","","",80,"","","","") 82 | arcpy.AddField_management(polyneartab,"x","DOUBLE") 83 | arcpy.AddField_management(polyneartab,"y","DOUBLE") 84 | arcpy.AddField_management(polyneartab,"angle","DOUBLE") 85 | arcpy.CalculateField_management(polyneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON") 86 | arcpy.AddField_management(pointneartab,"near_link","TEXT") 87 | arcpy.CalculateField_management(pointneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON") 88 | except: 89 | sendmsg(arcpy.GetMessages()) 90 | try: 91 | arcpy.MakeTableView_management(polyneartab, "polyneartab_View") 92 | arcpy.AddJoin_management("polyneartab_View", "IN_FID", polytopoint_shp, "ORIG_FID", "KEEP_ALL") 93 | arcpy.CalculateField_management("polyneartab_View","X","!polytopoint.POINT_X!","PYTHON") 94 | arcpy.CalculateField_management("polyneartab_View","Y","!polytopoint.POINT_Y!","PYTHON") 95 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint") 96 | 97 | arcpy.AddJoin_management("polyneartab_View","NEAR_FID",polytopoint_shp,"ORIG_FID","KEEP_ALL") 98 | ##arcpy.CalculateField_management("polyneartab_View","polyneartab:NameDirec","!polytopoint."+nameField+"!","PYTHON") 99 | arcpy.CalculateField_management("polyneartab_View","NameDirec","[polytopoint."+nameField+"]","VB") 100 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint") 101 | 102 | arcpy.MakeTableView_management(pointneartab, "pointneartab_View") 103 | arcpy.AddJoin_management("polyneartab_View","NEAR_LINK","pointneartab_View","NEAR_LINK","KEEP_ALL") 104 | arcpy.CalculateField_management("polyneartab_View","ANGLE","!pointneartab:NEAR_ANGLE!","PYTHON") 105 | 106 | expression = "DefAngle(float(!angle!),str(!NameDirec!))" 107 | if Direct == "Four": 108 | codeblock = """ if Abs ( [angle] ) <= 45 then 109 | temp = [NameDirec]+\"(东)\" 110 | elseif [angle] > 45 and [angle] <= 135 then 111 | temp = [NameDirec]+\"(北)\" 112 | elseif Abs ( [angle] ) > 135 then 113 | temp = [NameDirec]+\"(西)\" 114 | else 115 | temp = [NameDirec]+\"(南)\" 116 | end if """ 117 | else: 118 | codeblock = """if Abs([angle])<=22.5 then 119 | temp = [NameDirec]+\"(东)\" 120 | elseif [angle]>22.5 and [angle]<=67.5 then 121 | temp = [NameDirec]+\"(东北)\" 122 | elseif [angle]>67.5 and [angle]<=112.5 then 123 | temp = [NameDirec]+\"(北)\" 124 | elseif [angle]>112.5 and [angle]<=157.5 then 125 | temp = [NameDirec]+\"(西北)\" 126 | elseif Abs([angle])>157.5 then 127 | temp = [NameDirec]+\"(西)\" 128 | elseif [angle]>-157.5 and [angle]<=-112.5 then 129 | temp = [NameDirec]+\"(西南)\" 130 | elseif [angle]>-112.5 and [angle]<=-67.5 then 131 | temp = [NameDirec]+\"(南)\" 132 | else 133 | temp = [NameDirec]+\"(东南)\" 134 | end if""" 135 | arcpy.CalculateField_management(polyneartab,"NameDirec","temp","VB",codeblock) 136 | except: 137 | sendmsg(arcpy.GetMessages()) 138 | ## Add XY data 139 | try: 140 | spatialRef = arcpy.Describe(poly_shp).spatialReference 141 | arcpy.MakeXYEventLayer_management(polyneartab,"x","y","tempLayer",spatialRef) 142 | arcpy.CopyFeatures_management("tempLayer","point.shp") 143 | except: 144 | sendmsg(arcpy.GetMessages()) 145 | try: 146 | ## Spatial Join 147 | fieldmappings = arcpy.FieldMappings() 148 | CalFieldMappings(poly_shp,"point.shp","NameDirec",fieldmappings,fieldName,fieldLength) 149 | arcpy.SpatialJoin_analysis(poly_shp, "point.shp", outFile, "JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"CONTAINS", "", "") 150 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"]) 151 | except: 152 | sendmsg(arcpy.GetMessages()) 153 | 154 | ## Delete process data 155 | try: 156 | arcpy.Delete_management("polyneartabTemp") 157 | arcpy.Delete_management(pointneartab) 158 | arcpy.Delete_management(polyneartab) 159 | arcpy.Delete_management(polytopoint_shp) 160 | arcpy.Delete_management("point.shp") 161 | except: 162 | sendmsg(arcpy.GetMessages()) 163 | 164 | if __name__ == '__main__': 165 | setupNearAttributes() 166 | -------------------------------------------------------------------------------- /AddNearAtrributesDirections/AddNearAttributesDirections-EN-V1.0.py: -------------------------------------------------------------------------------- 1 | import arcpy 2 | from arcpy import env 3 | ##import string,sys 4 | ##reload(sys) 5 | ##sys.setdefaultencoding('utf8') 6 | ## --------------------------------------------------------------------------- 7 | # AddNearAttributesDirections.py 8 | # Created on: 2013-04-08 9 | # Author: Zhu Liangjun 10 | # --------------------------------------------------------------------------- 11 | #################### Inputs ######################## 12 | def setupNearAttributes(): 13 | poly_shp = arcpy.GetParameterAsText(0) 14 | nameField = arcpy.GetParameterAsText(1) 15 | fieldLength = arcpy.GetParameterAsText(2) 16 | isDirection = arcpy.GetParameterAsText(3) 17 | Direct = arcpy.GetParameterAsText(4) 18 | outFile = arcpy.GetParameterAsText(5) 19 | arcpy.gp.overwriteOutput = 1 20 | shpDesc = arcpy.Describe(poly_shp) 21 | env.workspace = shpDesc.Path 22 | if isDirection: 23 | AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldLength) 24 | else: 25 | AddNearAttributes(poly_shp,nameField,outFile,fieldLength) 26 | 27 | ################### Functions ###################### 28 | def sendmsg(msg): 29 | print msg 30 | arcpy.AddMessage(msg) 31 | 32 | def CalFieldMappings(origial_shp,join_shp,nameField,fieldmappings,Length): 33 | fieldmappings.addTable(origial_shp) 34 | AddNearPoly = arcpy.FieldMap() 35 | AddNearPoly.addInputField(join_shp,nameField) 36 | field = AddNearPoly.outputField 37 | field.name = "NearPoly" 38 | field.aliasName = "NearPoly" 39 | field.length = Length 40 | AddNearPoly.mergeRule = "Join" 41 | AddNearPoly.joinDelimiter = "," 42 | AddNearPoly.outputField = field 43 | fieldmappings.addFieldMap(AddNearPoly) 44 | ##sendmsg(fieldmappings.exportToString()) 45 | 46 | def AddNearAttributes(poly_shp,nameField,outFile,fieldLength): 47 | arcpy.Copy_management(poly_shp,"temp_poly.shp") 48 | fieldmappings = arcpy.FieldMappings() 49 | CalFieldMappings(poly_shp,"temp_poly.shp",nameField,fieldmappings,fieldLength) 50 | arcpy.SpatialJoin_analysis(poly_shp,"temp_poly.shp",outFile,"JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"INTERSECT", "", "") 51 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"]) 52 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],["+nameField+"]+\",\",\"\")","VB") 53 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],\",\"+["+nameField+"],\"\")","VB") 54 | arcpy.CalculateField_management(outFile,"NearPoly","Replace([NearPoly],["+nameField+"],\"\")","VB") 55 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,!"+nameField+"!+',','')","PYTHON") 56 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,','+!"+nameField+"!,'')","PYTHON") 57 | ## arcpy.CalculateField_management(outFile,"NearPoly","string.replace(!NearPoly!,!"+nameField+"!,'')","PYTHON") 58 | arcpy.Delete_management("temp_poly.shp") 59 | 60 | def AddNearAttributesDirec(poly_shp,nameField,Direct,outFile,fieldLength): 61 | ##Define temporary files 62 | polytopoint_shp = "polytopoint.shp" 63 | pointneartab = "pointneartab" 64 | polyneartab = "polyneartab.dbf" 65 | 66 | try: 67 | arcpy.FeatureToPoint_management(poly_shp, polytopoint_shp, "INSIDE") 68 | arcpy.AddXY_management(polytopoint_shp) 69 | except: 70 | sendmsg(arcpy.GetMessages()) 71 | try: 72 | arcpy.GenerateNearTable_analysis(polytopoint_shp, polytopoint_shp, pointneartab, "", "NO_LOCATION", "ANGLE", "ALL", "0") 73 | arcpy.GenerateNearTable_analysis(poly_shp, poly_shp, "polyneartabTemp","0", "NO_LOCATION", "NO_ANGLE", "ALL", "0") 74 | shpDesc = arcpy.Describe(poly_shp) 75 | arcpy.TableToTable_conversion("polyneartabTemp",shpDesc.Path,polyneartab) 76 | except: 77 | sendmsg(arcpy.GetMessages()) 78 | try: 79 | arcpy.AddField_management(polyneartab,"near_link","TEXT") 80 | arcpy.AddField_management(polyneartab,"NameDirec","TEXT","","",80,"","","","") 81 | arcpy.AddField_management(polyneartab,"x","DOUBLE") 82 | arcpy.AddField_management(polyneartab,"y","DOUBLE") 83 | arcpy.AddField_management(polyneartab,"angle","DOUBLE") 84 | arcpy.CalculateField_management(polyneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON") 85 | arcpy.AddField_management(pointneartab,"near_link","TEXT") 86 | arcpy.CalculateField_management(pointneartab,"near_link","'{0:0>5}'.format(str(!IN_FID!))+'{0:0>5}'.format(str(!NEAR_FID!))","PYTHON") 87 | except: 88 | sendmsg(arcpy.GetMessages()) 89 | try: 90 | arcpy.MakeTableView_management(polyneartab, "polyneartab_View") 91 | arcpy.AddJoin_management("polyneartab_View", "IN_FID", polytopoint_shp, "ORIG_FID", "KEEP_ALL") 92 | arcpy.CalculateField_management("polyneartab_View","X","!polytopoint.POINT_X!","PYTHON") 93 | arcpy.CalculateField_management("polyneartab_View","Y","!polytopoint.POINT_Y!","PYTHON") 94 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint") 95 | 96 | arcpy.AddJoin_management("polyneartab_View","NEAR_FID",polytopoint_shp,"ORIG_FID","KEEP_ALL") 97 | ##arcpy.CalculateField_management("polyneartab_View","polyneartab:NameDirec","!polytopoint."+nameField+"!","PYTHON") 98 | arcpy.CalculateField_management("polyneartab_View","NameDirec","[polytopoint."+nameField+"]","VB") 99 | arcpy.RemoveJoin_management("polyneartab_View","polytopoint") 100 | 101 | arcpy.MakeTableView_management(pointneartab, "pointneartab_View") 102 | arcpy.AddJoin_management("polyneartab_View","NEAR_LINK","pointneartab_View","NEAR_LINK","KEEP_ALL") 103 | arcpy.CalculateField_management("polyneartab_View","ANGLE","!pointneartab:NEAR_ANGLE!","PYTHON") 104 | 105 | expression = "DefAngle(float(!angle!),str(!NameDirec!))" 106 | if Direct == "Four": 107 | codeblock = """def DefAngle(angle,name): 108 | if abs(angle)<=45: 109 | return name+'(East)' 110 | elif angle>45 and angle<=135: 111 | return name+'(North)' 112 | elif abs(angle)>135: 113 | return name+'(West)' 114 | else: 115 | return name+'(South)'""" 116 | else: 117 | codeblock = """def DefAngle(angle,name): 118 | if abs(angle)<=22.5: 119 | return name+'(East)' 120 | elif angle>22.5 and angle<=67.5: 121 | return name+'(NorthEast)' 122 | elif angle>67.5 and angle<=112.5: 123 | return name+'(North)' 124 | elif angle>112.5 and angle<=157.5: 125 | return name+'(NorthWest)' 126 | elif abs(angle)>157.5: 127 | return name+'(West)' 128 | elif angle>-157.5 and angle<=-112.5: 129 | return name+'(SouthWest)' 130 | elif angle>-112.5 and angle<=-67.5: 131 | return name+'(South)' 132 | else: 133 | return name+'(SouthEast)'""" 134 | ##codeblock = (codeblock.decode('utf-8')).encode('gb2312') 135 | arcpy.CalculateField_management(polyneartab,"NameDirec",expression,"PYTHON",codeblock) 136 | except: 137 | sendmsg(arcpy.GetMessages()) 138 | ## Add XY data 139 | try: 140 | spatialRef = arcpy.Describe(poly_shp).spatialReference 141 | arcpy.MakeXYEventLayer_management(polyneartab,"x","y","tempLayer",spatialRef) 142 | arcpy.CopyFeatures_management("tempLayer","point.shp") 143 | except: 144 | sendmsg(arcpy.GetMessages()) 145 | try: 146 | ## Spatial Join 147 | fieldmappings = arcpy.FieldMappings() 148 | CalFieldMappings(poly_shp,"point.shp","NameDirec",fieldmappings,fieldLength) 149 | arcpy.SpatialJoin_analysis(poly_shp, "point.shp", outFile, "JOIN_ONE_TO_ONE", "KEEP_ALL", fieldmappings,"CONTAINS", "", "") 150 | arcpy.DeleteField_management(outFile,["Join_Count","TARGET_FID"]) 151 | except: 152 | sendmsg(arcpy.GetMessages()) 153 | 154 | ## Delete process data 155 | try: 156 | arcpy.Delete_management("polyneartabTemp") 157 | arcpy.Delete_management(pointneartab) 158 | arcpy.Delete_management(polyneartab) 159 | arcpy.Delete_management(polytopoint_shp) 160 | arcpy.Delete_management("point.shp") 161 | except: 162 | sendmsg(arcpy.GetMessages()) 163 | 164 | if __name__ == '__main__': 165 | setupNearAttributes() 166 | -------------------------------------------------------------------------------- /AddNearAtrributesDirections/AddNearAttributesDirections.tbx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/AddNearAtrributesDirections/AddNearAttributesDirections.tbx -------------------------------------------------------------------------------- /CSV2PtsShp/CSV2PtsShp.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | ## Author : Liangjun Zhu 3 | ## Email : crazyzlj@gmail.com 4 | ## Date : 2015-1-23 5 | ## Usage : Convert a .csv filetype points file to a vector shapefile 6 | ## put this .py file in the same folder, input the file name and 7 | ## x,y column name. 8 | import os,sys 9 | import arcpy 10 | from arcpy import env 11 | 12 | def currentPath(): 13 | path = sys.path[0] 14 | if os.path.isdir(path): 15 | return path 16 | elif os.path.isfile(path): 17 | return os.path.dirname(path) 18 | 19 | def CSV2PtsShp(CSVFile,X,Y): 20 | env.workspace = os.path.dirname(CSVFile) 21 | PtsShp = os.path.basename(CSVFile) 22 | PtsShp = PtsShp.split('.')[-2] + ".shp" 23 | print PtsShp 24 | try: 25 | arcpy.MakeXYEventLayer_management(CSVFile,X,Y,"tempLayer","","") 26 | arcpy.CopyFeatures_management("tempLayer",PtsShp) 27 | except: 28 | print arcpy.GetMessages() 29 | arcpy.AddMessage(arcpy.GetMessages()) 30 | 31 | print os.path.dirname(CSVFile) 32 | print "%s Convert to Shp Done!" % CSVFile 33 | 34 | if __name__ == '__main__': 35 | CSVName = "designed_samples.csv" 36 | XName = "RecommendedX" 37 | YName = "RecommendedY" 38 | currFolder = currentPath() 39 | CSVFile = currFolder + os.sep + CSVName 40 | CSV2PtsShp(CSVFile,XName,YName) 41 | -------------------------------------------------------------------------------- /CSV2PtsShp/designed_samples.csv: -------------------------------------------------------------------------------- 1 | Stability,PatternID,TotalArea,RecommendedX,RecommendedY,Ave.Membership 2 | 3,1,3102,431103.875000,3488299.000000,0.987639 3 | 3,1,3102,429103.875000,3469299.000000,0.986404 4 | 3,1,3102,697103.875000,3415299.000000,0.986380 5 | 3,2,996,437103.875000,3472299.000000,0.996329 6 | 3,2,996,466103.875000,3458299.000000,0.993498 7 | 3,2,996,623103.875000,3619299.000000,0.993234 8 | 3,5,768,616103.875000,3338299.000000,0.964042 9 | 3,5,768,415103.875000,3410299.000000,0.962403 10 | 3,5,768,390103.875000,3450299.000000,0.961458 11 | 3,4,376,728103.875000,3408299.000000,0.866977 12 | 3,4,376,363103.875000,3502299.000000,0.866923 13 | 3,4,376,587103.875000,3379299.000000,0.863143 14 | 3,3,150,420103.875000,3494299.000000,0.830402 15 | 3,3,150,361103.875000,3468299.000000,0.829132 16 | 3,3,150,425103.875000,3490299.000000,0.827112 17 | 3,6,8,450103.875000,3450299.000000,0.810822 18 | 3,6,8,401103.875000,3449299.000000,0.807702 19 | 3,6,8,389103.875000,3402299.000000,0.807020 -------------------------------------------------------------------------------- /DelRepeatFeatures/DelRepeat.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | #------------------------------------------------------------------------------- 3 | # Name: DelRepeat 4 | # Description: 5 | # 6 | # Created: 2013-3-29 7 | # Author: gjl 8 | # Contact: gjl 9 | #------------------------------------------------------------------------------- 10 | import arcpy 11 | 12 | def DelRepeat(inputFeatureClass, outputFeatureClass): 13 | #copy feature 14 | fc = outputFeatureClass 15 | arcpy.CopyFeatures_management(inputFeatureClass, fc) 16 | 17 | #unique id 18 | if len(arcpy.ListFields(fc, "TempId")) <= 0: 19 | arcpy.AddField_management(fc, "TempId", "LONG") 20 | if len(arcpy.ListFields(fc, "TempMark")) <= 0: 21 | arcpy.AddField_management(fc, "TempMark", "STRING") 22 | num = 1 23 | cursor = arcpy.UpdateCursor(fc) 24 | for row in cursor: 25 | row.TempId = num 26 | row.TempMark = "N" 27 | num +=1 28 | cursor.updateRow(row) 29 | del row 30 | del cursor 31 | 32 | #find repeat polygon 33 | repeat = [] 34 | cursor1 = arcpy.SearchCursor(fc) 35 | for row1 in cursor1: 36 | if row1.TempMark == "N": 37 | geom1 = row1.shape 38 | cursor2 = arcpy.UpdateCursor(fc) 39 | for row2 in cursor2: 40 | geom2 = row2.shape 41 | if geom1.equals(geom2) and (row2.TempId != row1.TempId): 42 | row2.TempMark = "Y" 43 | repeat.append(row2.TempId) 44 | cursor2.updateRow(row2) 45 | del row2 46 | del cursor2 47 | del row1 48 | del cursor1 49 | print repeat 50 | 51 | #delete repeat polygon 52 | arcpy.MakeFeatureLayer_management(fc, "layer") 53 | sql = "" 54 | for r in repeat: 55 | if sql == "": 56 | sql += "\"TempId\" = " + str(r) 57 | else: 58 | sql += " OR \"TempId\" = " + str(r) 59 | print sql 60 | arcpy.SelectLayerByAttribute_management("layer", "NEW_SELECTION", sql) 61 | arcpy.DeleteFeatures_management("layer") 62 | arcpy.DeleteField_management(fc, "TempId") 63 | arcpy.DeleteField_management(fc, "TempMark") 64 | 65 | def main(): 66 | inFC = arcpy.GetParameterAsText(0) 67 | outFC = arcpy.GetParameterAsText(1) 68 | DelRepeat(inFC, outFC) 69 | 70 | if __name__ == "__main__": 71 | main() -------------------------------------------------------------------------------- /DownloadModis/download_modis_nasa_earthdata.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Author: Liangjun Zhu 3 | # Date : 2017-3-13 4 | # Email : zlj@lreis.ac.cn 5 | # Blog : zhulj.net 6 | 7 | from utils import * 8 | import urllib2 9 | from bs4 import BeautifulSoup 10 | 11 | import ssl 12 | from functools import wraps 13 | 14 | 15 | def sslwrap(func): 16 | @wraps(func) 17 | def bar(*args, **kw): 18 | kw['ssl_version'] = ssl.PROTOCOL_TLSv1 19 | return func(*args, **kw) 20 | 21 | return bar 22 | 23 | 24 | ssl.wrap_socket = sslwrap(ssl.wrap_socket) 25 | 26 | 27 | def chunk_report(mbytes_so_far, total_size): 28 | if total_size > 0: 29 | percent = float(mbytes_so_far) / total_size 30 | percent = round(percent * 100, 2) 31 | sys.stdout.write("Downloaded %.3f of %.3f Mb (%0.2f%%)\r" % 32 | (mbytes_so_far, total_size, percent)) 33 | if mbytes_so_far >= total_size: 34 | sys.stdout.write('\n') 35 | else: 36 | pass # currently, do nothing 37 | 38 | 39 | def chunk_read(response, chunk_size=8192, savepath=None, report_hook=None): 40 | try: 41 | total_size = response.info().getheader('content-length').strip() 42 | total_size = float(total_size) / 1024. / 1024. 43 | except AttributeError: 44 | total_size = 0. 45 | bytes_so_far = 0 46 | 47 | while True: 48 | chunk = response.read(chunk_size) 49 | bytes_so_far += len(chunk) / 1024. / 1024. 50 | if not chunk: 51 | break 52 | if savepath is not None: 53 | savedata2file(chunk, savepath) 54 | if report_hook: 55 | report_hook(bytes_so_far, total_size) 56 | return bytes_so_far 57 | 58 | 59 | def downMODISfromNASAEarthdata(productname, **kwargs): 60 | from cookielib import CookieJar 61 | downUrl = 'https://e4ftl01.cr.usgs.gov/MOLT/' 62 | prefix = productname.split('.')[0] 63 | version = productname.split('.')[1] 64 | usrname = '' 65 | pwd = '' 66 | startdate = datetime.datetime.today() 67 | enddate = datetime.datetime.today() 68 | h = 0 69 | v = 8 70 | deltaday = 8 71 | outpath = '' 72 | # try to get the required key-values, or throw exception 73 | try: 74 | usrname = kwargs["usrname"] 75 | pwd = kwargs["pwd"] 76 | startdate = kwargs["startdate"] 77 | enddate = kwargs["enddate"] 78 | h = kwargs["h"] 79 | v = kwargs["v"] 80 | deltaday = kwargs["deltaday"] 81 | outpath = kwargs["workspace"] 82 | except KeyError: 83 | print ("downMODISfromNASAEarthdata function must have the usrname, pwd, startdate, and enddate args.") 84 | # try to get optional key-values 85 | logfile = None 86 | if 'log' in kwargs.keys(): 87 | logfile = kwargs['log'] 88 | delfile(logfile) 89 | 90 | authorizeUrl = "https://urs.earthdata.nasa.gov" 91 | # Create a password manager to deal with the 401 response that is returned from authorizeUrl 92 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() 93 | password_manager.add_password(None, authorizeUrl, usrname, pwd) 94 | cookie_jar = CookieJar() 95 | opener = urllib2.build_opener( 96 | urllib2.HTTPBasicAuthHandler(password_manager), 97 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see 98 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses 99 | urllib2.HTTPCookieProcessor(cookie_jar)) 100 | urllib2.install_opener(opener) 101 | 102 | tmpdate = startdate 103 | while tmpdate <= enddate: 104 | curdownUrl = downUrl + productname + '/' + tmpdate.strftime("%Y.%m.%d") + '/' 105 | print curdownUrl 106 | itemsList = read_url(curdownUrl) 107 | curItem = prefix + '.A%d%03d.h%02dv%02d.' % (tmpdate.year, doy(tmpdate), h, v) + version 108 | found, curItemUrls = locateStringInList(curItem, itemsList) 109 | if not found: 110 | print ("File %s not found!" % curItem) 111 | continue 112 | for curItemUrl in curItemUrls: 113 | tmpfile = outpath + os.sep + os.path.split(curItemUrl)[1] 114 | delfile(tmpfile) 115 | try: 116 | print2log(curItemUrl, logfile = logfile) 117 | request = urllib2.Request(curItemUrl) 118 | response = urllib2.urlopen(request) 119 | chunk_read(response, savepath = tmpfile, report_hook = chunk_report) 120 | except urllib2.HTTPError or urllib2.URLError, e: 121 | print e.code 122 | tmpdate += datetime.timedelta(days = deltaday) 123 | 124 | 125 | def read_url(url): 126 | url = url.replace(" ", "%20") 127 | try: 128 | req = urllib2.Request(url) 129 | a = urllib2.urlopen(req).read() 130 | soup = BeautifulSoup(a, 'html.parser') 131 | x = (soup.find_all('a')) 132 | allurl = [] 133 | for i in x: 134 | file_name = i.extract().get_text() 135 | url_new = url + file_name 136 | url_new = url_new.replace(" ", "%20") 137 | allurl.append(url_new) 138 | return allurl 139 | except urllib2.HTTPError or urllib2.URLError, e: 140 | print e.code 141 | 142 | 143 | if __name__ == '__main__': 144 | DOWN_PATH = r'D:\tmp' 145 | product = "MOD15A2H.006" 146 | usrname = 'your_user_name' 147 | pwd = 'your_password' 148 | startdate = [2002, 2, 18] # year, month, day 149 | enddate = [2002, 3, 5] 150 | deltaday = 8 151 | h = 1 152 | v = 11 153 | log = DOWN_PATH + os.sep + product + '.log' 154 | downMODISfromNASAEarthdata(product, usrname = usrname, pwd = pwd, 155 | startdate = list2datetime(startdate), 156 | enddate = list2datetime(enddate), 157 | deltaday = deltaday, h = h, v = v, 158 | workspace = DOWN_PATH, log = log) 159 | -------------------------------------------------------------------------------- /DownloadModis/utils.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Author: Liangjun Zhu 3 | # Date : 2017-1-11 4 | # Email : zlj@lreis.ac.cn 5 | # Blog : zhulj.net 6 | 7 | import os 8 | import sys 9 | import time 10 | import datetime 11 | import subprocess 12 | 13 | 14 | def currentPath(): 15 | path = sys.path[0] 16 | if os.path.isdir(path): 17 | return path 18 | elif os.path.isfile(path): 19 | return os.path.dirname(path) 20 | 21 | 22 | def mkdir(dirname): 23 | if not os.path.isdir(dirname): 24 | os.mkdir(dirname) 25 | 26 | 27 | def savedata2file(data, filepath): 28 | with open(filepath, "ab") as code: 29 | code.write(data) 30 | 31 | 32 | def StringMatch(str1, str2): 33 | if str1.lower() == str2.lower(): 34 | return True 35 | else: 36 | return False 37 | 38 | 39 | def list2datetime(datelist): 40 | try: 41 | if len(datelist) == 1: 42 | return datetime.datetime(datelist[0]) 43 | elif len(datelist) == 2: 44 | return datetime.datetime(datelist[0], datelist[1]) 45 | elif len(datelist) == 3: 46 | return datetime.datetime(datelist[0], datelist[1], datelist[2]) 47 | elif len(datelist) == 4: 48 | return datetime.datetime(datelist[0], datelist[1], datelist[2], datelist[3]) 49 | elif len(datelist) == 5: 50 | return datetime.datetime(datelist[0], datelist[1], datelist[2], datelist[3], datelist[4]) 51 | except TypeError: 52 | print ("Invalid inputs for datetime!") 53 | 54 | 55 | def isfileexist(filepath): 56 | if os.path.exists(filepath) and os.path.isfile(filepath): 57 | return True 58 | else: 59 | return False 60 | 61 | 62 | def delfile(filepath): 63 | if isfileexist(filepath): 64 | os.remove(filepath) 65 | 66 | 67 | def IsLeapYear(year): 68 | if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): 69 | return True 70 | else: 71 | return False 72 | 73 | 74 | def GetDayNumber(year, month): 75 | if month in [1, 3, 5, 7, 8, 10, 12]: 76 | return 31 77 | elif month in [4, 6, 9, 11]: 78 | return 30 79 | elif IsLeapYear(year): 80 | return 29 81 | else: 82 | return 28 83 | 84 | 85 | def doy(dt): 86 | sec = time.mktime(dt.timetuple()) 87 | t = time.localtime(sec) 88 | return t.tm_yday 89 | 90 | 91 | def print2log(msg, print2screen=True, logfile=None): 92 | if logfile is not None: 93 | f = open(logfile, 'a') 94 | f.write(msg) 95 | f.close() 96 | if print2screen: 97 | print (msg) 98 | 99 | 100 | def isnumerical(x): 101 | try: 102 | xx = float(x) 103 | except TypeError: 104 | return False 105 | except ValueError: 106 | return False 107 | except 'Exception': 108 | return False 109 | else: 110 | return True 111 | 112 | 113 | def runcommand(commands): 114 | """ 115 | Execute external command, and return the output lines list 116 | :param commands: string or list 117 | :return: output lines 118 | """ 119 | print (commands) 120 | use_shell = True 121 | if isinstance(commands, list) or isinstance(commands, tuple): 122 | use_shell = False 123 | process = subprocess.Popen(commands, shell = use_shell, stdout = subprocess.PIPE, stdin = open(os.devnull), 124 | stderr = subprocess.STDOUT, universal_newlines = True) 125 | return process.stdout.readlines() 126 | 127 | 128 | def zipfiles(filenames, zip_file): 129 | commands = ['python', '-m', 'zipfile', '-c', zip_file] 130 | if filenames: 131 | for filename in filenames: 132 | commands.append(filename) 133 | runcommand(commands) 134 | 135 | 136 | def locateStringInList(str, strlist): 137 | found = False 138 | foundstr = [] 139 | for tmpstr in strlist: 140 | if str in tmpstr: 141 | found = True 142 | foundstr.append(tmpstr) 143 | return found, foundstr 144 | -------------------------------------------------------------------------------- /ExtractMultiValue2Zones/ExtMultiVal2Polygon.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/ExtractMultiValue2Zones/ExtMultiVal2Polygon.py -------------------------------------------------------------------------------- /ExtractMultiValue2Zones/ExtractMultiValue2Zones.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tool Name: MultiValue2Zones 3 | Source Code: MultiValue2Zones.py 4 | Version: v1.0 based on ArcGIS 10.x 5 | Author: Liangjun Zhu 6 | Contact: crazyzlj@gmail.com 7 | Start Date: 2012/12/14 8 | Revised Date : 2015/1/16 9 | 10 | This script will statistic the value of given rasters within the 11 | zones of another polygon shapefile and report the results to a 12 | CSV file. 13 | This script can calculate values included "MEAN","MAJORITY", 14 | "MAXIMUM","MEDIAN","MINIMUM","MINORITY","RANGE","STD","SUM", 15 | "VARIETY". Each raster's value will be appended to the origin 16 | shapefile's attribute table and named by the corresponding 17 | raster's name. 18 | """ 19 | ################### Imports ######################## 20 | import os,sys,arcpy,string 21 | from arcpy.sa import * 22 | from arcpy.management import * 23 | from arcpy import env 24 | 25 | #################### Inputs ######################## 26 | def currentPath(): 27 | path = sys.path[0] 28 | if os.path.isdir(path): 29 | return path 30 | elif os.path.isfile(path): 31 | return os.path.dirname(path) 32 | 33 | def setupMultiVal2Poly(): 34 | 35 | ## The default set is: 1. DEM in workspace\\DEM; 2. params layers in workspace\\params; 36 | ## 3. Reclassify DEM for statistics zones ## 37 | 38 | demfolder = currentPath() + "\\DEM" 39 | paramfolder = currentPath() + "\\params" 40 | resultfolder = currentPath() + "\\results" 41 | #print resultfolder 42 | if not os.path.exists(resultfolder): 43 | os.mkdir(resultfolder) 44 | if os.path.exists(demfolder): 45 | arcpy.env.workspace = demfolder 46 | else: 47 | print "Please make a 'DEM' folder which contains the DEM file." 48 | raw_input() 49 | exit() 50 | arcpy.gp.overwriteOutput = 1 51 | arcpy.CheckOutExtension("Spatial") 52 | if arcpy.ListRasters("*","ALL") == []: 53 | print "Please check the DEM folder to make sure the existence of DEM raster file." 54 | raw_input() 55 | exit() 56 | else: 57 | cls = range(0,8100,100) 58 | classifyIdx = [] 59 | for i in range(len(cls)-1): 60 | classifyIdx.append([cls[i],cls[i+1],i]) 61 | #classifyIdx.append([7000,8000,len(cls)]) 62 | print "The reclassification of DEM is :" 63 | print classifyIdx 64 | 65 | for DEMfile in arcpy.ListRasters("*","ALL"): 66 | print "Reclassify the DEM raster..." 67 | outReclass = Reclassify(DEMfile, "Value",RemapRange(classifyIdx)) 68 | DEMcls = resultfolder + "\\DEMcls" 69 | outReclass.save(DEMcls) 70 | DEMclsShp = resultfolder + "\\DEMcls.shp" 71 | arcpy.RasterToPolygon_conversion(DEMcls, DEMclsShp, "NO_SIMPLIFY","VALUE") 72 | DEMclsDis = resultfolder + "\\DEMclsDis.shp" 73 | arcpy.Dissolve_management(DEMclsShp,DEMclsDis,"GRIDCODE","","MULTI_PART","") 74 | #break 75 | 76 | OriginShp = DEMclsDis 77 | ZoneField = "GRIDCODE" 78 | IgnoreNodata = "DATA" 79 | SummarizeVal = ["MEAN","MAJORITY","MAXIMUM","MEDIAN","MINIMUM","RANGE","STD","SUM","VARIETY"] 80 | JoinType = "" 81 | RasterFolder = paramfolder 82 | outFolder = resultfolder 83 | ## End the default setting ## 84 | 85 | ## If you want to use this tool in Arctoolbox, the code above should be replaced by below ## 86 | OriginShp = arcpy.GetParameterAsText(0) 87 | ZoneField = arcpy.GetParameterAsText(1) 88 | IgnoreNodata = arcpy.GetParameterAsText(2) 89 | SummarizeVal = arcpy.GetParameterAsText(3) 90 | JoinType = arcpy.GetParameterAsText(4) 91 | RasterFolder = arcpy.GetParameterAsText(5) 92 | outFolder = arcpy.GetParameterAsText(6) 93 | outFileName = arcpy.GetParameterAsText(7) 94 | ## End of code for Arctoolbox's input information ## 95 | arcpy.gp.overwriteOutput = 1 96 | for SummVal in SummarizeVal: 97 | print "Calculating the Index of %s..." % SummVal 98 | MultiVal2Poly(OriginShp,ZoneField,IgnoreNodata,SummVal,JoinType,RasterFolder,outFolder,SummVal) 99 | print "All mission done sucessfully!" 100 | 101 | ################### Functions ###################### 102 | 103 | def ListFields(FileLayer): 104 | fields = arcpy.gp.listFields(FileLayer) 105 | fieldList = [] 106 | for field in fields: 107 | fieldList.append([str(field.name),str(field.type)]) 108 | return fieldList 109 | def AddCalDelField(ShpFile,AddName,CalName,FieldDataType): 110 | arcpy.AddField_management(ShpFile,AddName,FieldDataType) 111 | arcpy.CalculateField_management(ShpFile,AddName,"!"+CalName+"!","PYTHON") 112 | arcpy.DeleteField_management(ShpFile,CalName) 113 | def SaveShpAsCSV(ShpFile,OutDir,OutputName): 114 | fields = arcpy.gp.listFields(ShpFile) 115 | fieldList2 = [] 116 | for field in fields: 117 | if field.name != "Shape": 118 | fieldList2.append(str(field.name)) 119 | #print fieldList2 120 | try: 121 | if not os.path.exists(OutDir+"\\"+OutputName+".csv"): 122 | arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES") 123 | else: 124 | os.remove(OutDir+"\\"+OutputName+".csv") 125 | arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES") 126 | except: 127 | errorStr = arcpy.gp.GetMessages() 128 | def MultiVal2Poly(OriginShp,ZoneField,IgnoreNodata,SummarizeVal,JoinType,RasterFolder,outFolder,outFileName): 129 | if outFolder == "": 130 | outFolder == RasterFolder 131 | if os.path.exists(RasterFolder): 132 | arcpy.env.workspace = RasterFolder 133 | else: 134 | print "Please make a 'params' folder which contains the parameter files." 135 | raw_input() 136 | exit() 137 | arcpy.Copy_management(OriginShp,"p.shp") 138 | DropFields = ["ZonSAT_OID","ZonSAT_STU","ZonSAT_ZON","ZonSAT_GRI","ZonSAT_COU","ZonSAT_ARE"] 139 | if arcpy.ListRasters("*","ALL") == []: 140 | print "Please check the DEM folder to make sure the existence of DEM raster file." 141 | raw_input() 142 | exit() 143 | for rasterFile in arcpy.ListRasters("*","ALL"): 144 | print " Handing the %s parameter raster " % rasterFile 145 | zoneShp = "p.shp" 146 | curFileName = os.path.splitext(rasterFile)[0] 147 | try: 148 | arcpy.CheckOutExtension("Spatial") 149 | ZonSAT = ZonalStatisticsAsTable(zoneShp,ZoneField,rasterFile,"ZonSAT.dbf",IgnoreNodata,SummarizeVal) 150 | arcpy.MakeFeatureLayer_management(zoneShp,"tempLayer") 151 | arcpy.AddJoin_management("tempLayer",ZoneField,"ZonSAT.dbf",ZoneField,JoinType) 152 | arcpy.CopyFeatures_management("tempLayer",curFileName) 153 | curFileNameShp = curFileName+".shp" 154 | arcpy.Delete_management("ZonSAT.dbf") 155 | except: 156 | arcpy.gp.GetMessages() 157 | try: 158 | AddCalDelField(curFileNameShp,curFileName,"ZonSAT_"+SummarizeVal[0:3],"DOUBLE") 159 | arcpy.DeleteField_management(curFileNameShp,DropFields) 160 | for field in ListFields(curFileNameShp): 161 | #print field 162 | if not(field[0]=="FID" or field[0]=="Shape" or field[0]==curFileName): 163 | if field[0][2:] == "GRIDCODE": 164 | AddCalDelField(curFileNameShp,field[0][2:],field[0],"INTEGER") 165 | else: 166 | AddCalDelField(curFileNameShp,field[0][2:],field[0],field[1]) 167 | #print "ZonSAT_"+SummarizeVal[0:3] 168 | arcpy.Delete_management(zoneShp) 169 | arcpy.Copy_management(curFileNameShp,"p.shp") 170 | arcpy.Delete_management(curFileNameShp) 171 | except: 172 | arcpy.gp.GetMessages() 173 | arcpy.Copy_management("p.shp",outFolder+"\\"+outFileName+".shp") 174 | arcpy.Delete_management("p.shp") 175 | print " Saving the Attribute table to CSV file..." 176 | SaveShpAsCSV(outFolder+"\\"+outFileName+".shp",outFolder,outFileName) 177 | 178 | if __name__ == '__main__': 179 | setupMultiVal2Poly() 180 | raw_input() 181 | exit() -------------------------------------------------------------------------------- /ExtractMultiValue2Zones/ExtractMultiValue2Zones.tbx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/ExtractMultiValue2Zones/ExtractMultiValue2Zones.tbx -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/.idea/ExtractRasterByMultiPolygon.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/.idea/encodings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | C:\Users\ZhuLJ\AppData\Roaming\Subversion 16 | 17 | 18 | 19 | 20 | 21 | 22 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/.idea/workspace.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 12 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 1453554611822 32 | 35 | 36 | 37 | 38 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /ExtractRasterByMultiPolygon/ExtractRasterByMultiPolygon.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf-8 3 | # Function : Extract Raster By Mask of MultPolygon Shapefile. 4 | # Created By: Liangjun Zhu 5 | # Date : 1/23/16 6 | # Email : zlj@lreis.ac.cn 7 | # 8 | import os 9 | import arcpy 10 | from arcpy import env 11 | 12 | def ListFieldValues(fileLayer, fName): 13 | fields = arcpy.gp.listFields(fileLayer) 14 | fieldValues = [] 15 | flag = False 16 | for field in fields: 17 | if str(field.name) == fName: 18 | flag = True 19 | if flag: 20 | rowCursor = arcpy.SearchCursor(fileLayer) 21 | for row in rowCursor: 22 | fieldValues.append(row.getValue(fName)) 23 | return (fieldValues, flag) 24 | def ExtractRasterByMultiPolygon(shpFile, filedName, originRasterFile, bufferSize, suffix, outPath): 25 | ## Set environment settings 26 | if not os.path.isdir(outPath): ## if outPath is not exist, then build it. 27 | if outPath != "": 28 | os.mkdir(outPath) 29 | env.workspace = outPath 30 | ## Split polygon by fieldName 31 | polyNames, flag = ListFieldValues(shpFile, filedName) 32 | if(flag): 33 | arcpy.gp.overwriteOutput = 1 34 | ## Get the cellsize of originRasterFile 35 | cellSizeResult = arcpy.GetRasterProperties_management(originRasterFile, "CELLSIZEX") 36 | cellSize = cellSizeResult.getOutput(0) 37 | bufferDistance = float(cellSize) * bufferSize 38 | arcpy.Split_analysis(shpFile, shpFile, filedName, outPath) 39 | polyFiles = [] 40 | polyBufferFiles = [] 41 | polyFinalFiles = [] 42 | rasterFiles = [] 43 | for name in polyNames: 44 | polyFile = outPath + os.sep + name + '.shp' 45 | polyBufferFile = outPath + os.sep + name + '_buf.shp' 46 | polyFinalFile = outPath + os.sep + name + '_final.shp' 47 | if suffix is None: 48 | rasterFile = outPath + os.sep + name + '.tif' 49 | else: 50 | rasterFile = outPath + os.sep + name + suffix + '.tif' 51 | polyFiles.append(polyFile) 52 | polyBufferFiles.append(polyBufferFile) 53 | rasterFiles.append(rasterFile) 54 | polyFinalFiles.append(polyFinalFile) 55 | arcpy.Buffer_analysis(polyFile, polyBufferFile, bufferDistance, "OUTSIDE_ONLY") 56 | arcpy.Merge_management([polyFile, polyBufferFile], polyFinalFile) 57 | 58 | if arcpy.CheckOutExtension("Spatial") == "CheckedOut": 59 | for i in range(0,len(polyBufferFiles)): 60 | tempRaster = arcpy.sa.ExtractByMask(originRasterFile, polyFinalFiles[i]) 61 | tempRaster.save(rasterFiles[i]) 62 | else: 63 | print "The %s is not exist in %s" % (filedName, shpFile) 64 | return None 65 | 66 | if __name__ == '__main__': 67 | ## input 68 | MultiPolyShp = r'D:\data\GLake\basins.shp' 69 | FieldName = "Code" ## Field used to name raster files 70 | RasterFile = r'D:\data\GLake\glake_id.tif' 71 | #RasterFile = r'D:\data\GLake\srtm_tp.tif' 72 | BufferSize = 20 ## By default, every single polygon will buffer a distance of 10*cellsize 73 | suffix = "_ID" ## If no suffix, set as None 74 | ## output 75 | outDir = r'D:\data\GLake\GLoutput' 76 | ## run 77 | ExtractRasterByMultiPolygon(MultiPolyShp, FieldName, RasterFile, BufferSize, suffix, outDir) 78 | -------------------------------------------------------------------------------- /HydroDataDownload/Down_Fujian_RealTimeData_Shuizhi.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf-8 3 | # 4 | # 福建省地表水水质实时信息公开系统(试运行) 5 | # https://szfb.fjeec.cn:444/AutoData/Business/DataPublish_FJ/index.html 6 | # 7 | # 8 | # 9 | # Created by Liangjun Zhu (zlj@lreis.ac.cn) 10 | # Updated: 08/17/2020 11 | # 06/30/2021 Add verify=False to request.get() function 12 | from __future__ import unicode_literals 13 | 14 | import os 15 | import json 16 | import datetime 17 | from io import open 18 | import requests 19 | from requests.exceptions import RequestException 20 | 21 | from apscheduler.schedulers.blocking import BlockingScheduler 22 | from pygeoc.utils import UtilClass 23 | 24 | REAL_URL = 'https://szfb.fjeec.cn:444/API/PublicService/ShuiZhiFaBu/GetRealData?AreaID=&RiverID=' 25 | REQ_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ' 26 | '(KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', 27 | 'Accept': 'application/json,text/plain,*/*', 28 | 'Content-Type': 'application/json;charset=utf8', 29 | 'Authorization': 'Public_Web=6A607FAB00686B7B363BD9A81B835649'} 30 | 31 | 32 | def get_realtime_data(): 33 | try: 34 | response = requests.get(REAL_URL, headers=REQ_HEADERS, verify=False) 35 | if response.status_code == 200: 36 | tmpstr = response.text 37 | tmpstr = tmpstr.replace('\r\n', '') 38 | tmpstr = tmpstr.replace('\n', '') 39 | tmpstr = tmpstr.replace('\r', '') 40 | return tmpstr 41 | return None 42 | except RequestException as excpt: 43 | print(excpt) 44 | print('Get data failed from %s' % REAL_URL) 45 | 46 | 47 | def down_routinely(savedir): 48 | """Write response string to log file and Parsed JSON to YYYY-MM-DD-HH.json file.""" 49 | ctime = datetime.datetime.now() 50 | ctime_str = ctime.strftime('%Y-%m-%d %H:%M:%S') 51 | print('Executed at %s' % ctime_str) 52 | 53 | dstring = get_realtime_data() 54 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.data', 'a', encoding='utf-8') as logf: 55 | if dstring is None: 56 | logf.write('[%s] Get data failed!\n' % ctime_str) 57 | else: 58 | logf.write('[%s] %s\n' % (ctime_str, dstring)) 59 | if dstring is None: 60 | return 61 | 62 | djson = json.loads(dstring) 63 | if 'ResultList' not in djson: 64 | return 65 | if len(djson['ResultList']) < 1: 66 | return 67 | if 'DataTime' not in djson['ResultList'][0]: 68 | return 69 | data_time_str = djson['ResultList'][0]['DataTime'] 70 | data_time = datetime.datetime.strptime(data_time_str, '%Y/%m/%d %H:%M') 71 | 72 | rawdir = wp + os.sep + 'raw_data' 73 | UtilClass.mkdir(rawdir) 74 | json_name = '%s.json' % (data_time.strftime('%Y-%m-%d-%H')) 75 | json_file = rawdir + os.sep + json_name 76 | 77 | if os.path.exists(json_file): 78 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.log', 'a', encoding='utf-8') as logf: 79 | logf.write('[%s] %s already exist.\n' % (ctime_str, json_name)) 80 | else: 81 | with open(json_file, 'w', encoding='utf-8') as jf: 82 | jf.write(json.dumps(djson, indent=4, ensure_ascii=False)) 83 | with open(savedir + os.sep + 'FJ_realdata_shuizhi.log', 'a', encoding='utf-8') as logf: 84 | logf.write('[%s] %s saved successfully.\n' % (ctime_str, json_name)) 85 | 86 | 87 | if __name__ == "__main__": 88 | wp = UtilClass.current_path(lambda: 0) 89 | # wp = 'D:\\tmp\\fujian_shuizhi_realtime' 90 | UtilClass.mkdir(wp) 91 | 92 | # down_routinely(wp) 93 | 94 | sched = BlockingScheduler() 95 | sched.add_job(down_routinely, args=[wp], trigger='interval', seconds=10800) 96 | sched.start() 97 | -------------------------------------------------------------------------------- /HydroDataDownload/GetYcRainSum_20150805-zhulm.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import urllib2,httplib,string,sys,time 4 | from xml.etree import ElementTree as xmlTree 5 | 6 | #--------获取页面,保存为XML-------------------- 7 | def GetYcRainSum(sTime, eTime, stcd, tDivide): 8 | params = \ 9 | ''' 10 | 11 | 12 | 13 | %s 14 | %s 15 | %s 16 | %s 17 | 18 | 19 | ''' 20 | SoapMessage = params % (sTime, eTime, stcd, tDivide) 21 | 22 | def getXML(sTime, eTime, stcd, tDivide): 23 | try: 24 | #-------------这个页面打不开---------- 25 | conn = httplib.HTTP("yc.wswj.net") 26 | #-------------这个页面打不开---------- 27 | # request是自动发送header,putrequest要手动发送header(两者之间的区别) 28 | conn.putrequest("POST","/ahyc/web_rain/Service.asmx") 29 | 30 | conn.putheader("Accept","*/*") 31 | conn.putheader("Accept-Encoding","gzip,deflate,sdch") 32 | conn.putheader("Accept-Language","zh-CN,zh;q=0.8,en;q=0.6") 33 | conn.putheader("Host","yc.wswj.net") 34 | conn.putheader("Origin","http://yc.wswj.net") 35 | #conn.putheader("Connection","keep-alive"); 36 | #-------这个页面可以打开,是降水分布的网址--------这个页面含有降水数据,不知道如何查看页面具体内容-------------- 37 | conn.putheader("Referer","http://yc.wswj.net/ahyc/Main2.swf") 38 | 39 | conn.putheader("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36"); 40 | conn.putheader("Content-Type","text/xml; charset=utf-8") 41 | conn.putheader("Content-Length", "%d" % len(SoapMessage)) 42 | conn.putheader("SOAPAction","http://tempuri.org/GetYcRainSum") 43 | conn.putheader("Cookie","ASP.NET_SessionId=suj53q55f3qxjp55anbw0sjg; CNZZDATA3906820=cnzz_eid%3D1809096373-1405232367-http%253A%252F%252Fhfswj.net%252F%26ntime%3D1405302063") 44 | conn.endheaders() 45 | conn.send(SoapMessage) 46 | statuscode, statusmessage, header = conn.getreply() 47 | print "Response: ",statuscode, statusmessage 48 | if statuscode == 200: 49 | #print "Headers: ", header 50 | Res = conn.getfile().read() 51 | #print str(Res).decode('utf-8') 52 | return Res 53 | 54 | except: 55 | time.sleep(20) 56 | return getXML(sTime, eTime, stcd, tDivide) 57 | return getXML(sTime, eTime, stcd, tDivide) 58 | 59 | 60 | #---------------将获取的XML站点转换为“CSV”格式----------------- 61 | def SaveXML2Csv(Res, findName, savePath, year): 62 | tree = xmlTree.fromstring(Res) 63 | nodes = tree.findall(findName) 64 | if not nodes: 65 | return 0 66 | else: 67 | f=open(savePath,"a") 68 | 69 | for node in nodes: 70 | itemline = "" 71 | month = str(node[0].text.encode('utf-8'))[0:2] 72 | day = str(node[0].text.encode('utf-8'))[5:7] 73 | HH = str(node[0].text.encode('utf-8'))[10:12] 74 | itemline+=str(year)+"/"+month+"/"+day+" "+HH+":00"+","+str(node[1].text.encode('utf-8'))+"\n" 75 | #print itemline 76 | 77 | f.write(itemline) 78 | f.close() 79 | return 1 80 | 81 | #--------------主函数入口------------------------ 82 | if __name__ == '__main__': 83 | print "Beigin to download YcRainSum data!" 84 | 85 | 86 | f=open(r"D:\WorkSpace\Download_RainData\Zhanhao.txt","r") 87 | 88 | 89 | ZhanHaos = [] 90 | for eachSite in f: 91 | ZhanHaos.append(eachSite.split('\n')[0]) 92 | f.close() 93 | #print len(ZhanHaos) 94 | print ZhanHaos 95 | 96 | def downData(start, end, ZhanHao, year): 97 | #------------开始下载页面----------------- 98 | xmlText = GetYcRainSum(start, end, ZhanHao, "60") 99 | savename=ZhanHao+ '-' +year 100 | 101 | 102 | savePath = r'D:\WorkSpace\Download_RainData\2011\%s.txt' % savename 103 | 104 | 105 | #------------调用前面函数,进行格式转换-------------- 106 | success = SaveXML2Csv(xmlText, ".//GetRainValue", savePath, year) 107 | #ZhanHaos = ['62903180','62942737','62942707','62915310','62933800','62942747','62922800','62942717','62942757'] 108 | 109 | #-----------降水数据从2007年开始才有记录-------------------- 110 | #years = ['2007','2008','2009','2010','2011','2012','2013','2014','2015'] 111 | years = ['2013'] 112 | #years = ['2015'] 113 | #months = ['01','02','03','04','05','06','07','08','09','10','11','12'] 114 | #downData('2013-12-01 00:00','2013-12-31 00:00', '62903180') 115 | 116 | for ZhanHao in ZhanHaos: 117 | for year in years: 118 | print "Downloading "+str(ZhanHao)+"'s data in "+str(year)+" ..." 119 | sTime = str(year)+'-01-01 00:00' 120 | eTime = str(year)+'-12-31 23:00' 121 | print ZhanHao,sTime,eTime 122 | downData(sTime,eTime, ZhanHao, year) 123 | print "Download "+str(ZhanHao)+"'s data successfully!" 124 | print "Download Succeed!" 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /HydroDataDownload/ReadDatabase_SURF_CLI_CHN_MUL_DAY.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Func. : Read Database of SURF_CLI_CHN_MUL_DAY_V3.0 3 | # Author: Liangjun Zhu 4 | # Date : 2016-4-11 5 | # Email : zlj@lreis.ac.cn 6 | # Blog : http://zhulj.net/python/2016/04/11/Constructing-SURF_CLI_CHN_MUL_DAY_V3.0-database.html 7 | 8 | import datetime 9 | import os 10 | import sqlite3 11 | 12 | 13 | def get_conn(path): 14 | """ 15 | get connection of Sqlite 16 | :param path: path of Sqlite database 17 | """ 18 | conn = sqlite3.connect(path) 19 | if os.path.exists(path) and os.path.isfile(path): 20 | # print('database in hardware :[{}]'.format(path)) 21 | return conn 22 | else: 23 | conn = None 24 | # print('database in memory :[:memory:]') 25 | return sqlite3.connect(':memory:') 26 | 27 | 28 | def get_cursor(conn): 29 | """ 30 | get cursor of current connection 31 | :param conn: connection of Sqlite 32 | """ 33 | if conn is not None: 34 | return conn.cursor() 35 | else: 36 | return get_conn('').cursor() 37 | 38 | 39 | def close_all(conn, cu): 40 | """ 41 | close connection and cursor of Sqlite 42 | :param conn: connection of Sqlite 43 | :param cu: cursor of conn 44 | """ 45 | try: 46 | if cu is not None: 47 | cu.close() 48 | finally: 49 | if cu is not None: 50 | cu.close() 51 | 52 | 53 | def getTablesList(dbpath): 54 | """ 55 | Get all tables' name in Sqlite database 56 | :param dbpath: 57 | :return: table names 58 | """ 59 | conn = sqlite3.connect(dbpath) 60 | cu = get_cursor(conn) 61 | tabs = cu.execute( 62 | "select name from sqlite_master where type = 'table' order by name").fetchall() 63 | tabList = list() 64 | for tab in tabs: 65 | if len(tab[0]) == 6: 66 | tabList.append(tab[0]) 67 | close_all(conn, cu) 68 | return tabList 69 | 70 | 71 | def fetchData(conn, sql): 72 | """ 73 | Query data by sql 74 | :param conn: 75 | :param sql: 76 | :return: data queried 77 | """ 78 | data = list() 79 | if sql is not None and sql != '': 80 | cu = get_cursor(conn) 81 | cu.execute(sql) 82 | r = cu.fetchall() 83 | if len(r) > 0: 84 | for e in range(len(r)): 85 | # print(r[e]) 86 | data.append(r[e]) 87 | else: 88 | print('the [{}] is empty or equal None!'.format(sql)) 89 | return data 90 | 91 | 92 | def saveToCSV(data, csvPath, flag='climData', fields=None): 93 | f = open(csvPath, "w") 94 | title = '' 95 | if flag == 'climData': 96 | if fields is None: 97 | title = 'stationID,datetimeBJ,avgPRS,maxPRS,minPRS,avgTEM,maxTEM,minTEM,' \ 98 | 'avgRHU,minRHU,PRE208,PRE820,PRE,smEVP,lgEVP,avgWIN,maxWIN,maxWINASP,' \ 99 | 'extWIN,extWINASP,SSD,avgGST,maxGST,minGST\n' 100 | else: 101 | title = ','.join(fields) 102 | title += '\n' 103 | elif flag == 'stationInfo': 104 | title = 'stationID,lat,lon,alti\n' 105 | f.write(title) 106 | for items in data: 107 | itemsStr = '' 108 | if flag == 'stationInfo': 109 | items = items[0] 110 | for item in items: 111 | itemsStr += str(item) 112 | itemsStr += ',' 113 | itemsStr = itemsStr[:-1] 114 | itemsStr += '\n' 115 | f.write(itemsStr) 116 | f.close() 117 | 118 | 119 | def isNum(value): 120 | try: 121 | x = int(value) 122 | except TypeError: 123 | return False 124 | except ValueError: 125 | return False 126 | except Exception: 127 | return False 128 | else: 129 | return True 130 | 131 | 132 | def QueryDatabase(dbpath, savePath, stationIDs, startTime, endTime, fields=None): 133 | """ 134 | Query and save data from Sqlite database 135 | :param dbpath: 136 | :param savePath: 137 | :param stationIDs: 138 | :param startTime: 139 | :param endTime: 140 | :param newfields: List of selected fields, None means all fields. 141 | :return: 142 | """ 143 | tableList = getTablesList(dbpath) 144 | conn = sqlite3.connect(dbpath) 145 | if not os.path.isdir(savePath): 146 | os.mkdir(savePath) 147 | stationInfoCSVPath = savePath + os.sep + 'stationInfo.csv' 148 | stationInfoData = list() 149 | if stationIDs == list(): 150 | stationIDs = getTablesList(dbpath) 151 | else: 152 | for i in range(len(stationIDs)): 153 | if isNum(stationIDs[i]): 154 | stationIDs[i] = 'S' + str(stationIDs[i]) 155 | else: 156 | stationIDs[i] = 'S' + stationIDs[i] 157 | if fields is None: 158 | fields = ['stID', 'date', 'avgPRS', 'maxPRS', 'minPRS', 'avgTEM', 'maxTEM', 'minTEM', 159 | 'avgRHU', 'minRHU', 'PRE208', 'PRE820', 'PRE', 'smEVP', 'lgEVP', 'avgWIN', 160 | 'maxWIN', 'maxWINASP', 'extWIN', 'extWINASP', 'SSD', 'avgGST', 'maxGST', 'minGST'] 161 | else: 162 | fields.insert(0, 'date') 163 | fields.insert(0, 'stID') 164 | selects = ','.join(fields) 165 | for tabName in stationIDs: 166 | # tabName = 'S' + stationID 167 | stationID = tabName[1:] 168 | if tabName in tableList: 169 | csvPath = savePath + os.sep + tabName + '.csv' 170 | startT = datetime.datetime(startTime[0], startTime[1], startTime[2]) 171 | endT = datetime.datetime(endTime[0], endTime[1], endTime[2]) 172 | endT += datetime.timedelta(days=1) 173 | startTStr = startT.strftime("%Y-%m-%d %H:%M:%S")[:10] 174 | endTStr = endT.strftime("%Y-%m-%d %H:%M:%S")[:10] 175 | fetch_data_sql = '''SELECT %s FROM %s WHERE date BETWEEN "%s" AND 176 | "%s" ORDER BY date''' % (selects, tabName, startTStr, endTStr) 177 | # print(fetch_data_sql) 178 | data = fetchData(conn, fetch_data_sql) 179 | saveToCSV(data, csvPath, fields=fields) 180 | fetch_station_sql = '''SELECT * FROM stationInfo WHERE stID=%s ''' % stationID 181 | stationInfoData.append(fetchData(conn, fetch_station_sql)) 182 | saveToCSV(stationInfoData, stationInfoCSVPath, 'stationInfo') 183 | conn.close() 184 | 185 | 186 | if __name__ == '__main__': 187 | # Input parameters 188 | SQLITE_DB_PATH = r'D:\data\common_GIS_Data\SURF_CLI_CHN_MUL_DAY_V3.0\SURF_CLI_CHN_MUL_DAY_V3-201712.db' 189 | QUERY_STATION_IDs = [58911] 190 | QUERY_DATE_FROM = [1950, 1, 1] # format: Year, Month, Day 191 | QUERY_DATE_END = [2017, 12, 31] 192 | # Available fields: 193 | # avgPRS,maxPRS,minPRS,avgTEM,maxTEM,minTEM, avgRHU,minRHU, 194 | # PRE208,PRE820,PRE,smEVP,lgEVP,avgWIN,maxWIN,maxWINASP, 195 | # extWIN,extWINASP,SSD,avgGST,maxGST,minGST 196 | SELECTED_FIELDS = None 197 | 198 | SAVE_PATH = r'D:\tmp' 199 | 200 | QueryDatabase(SQLITE_DB_PATH, SAVE_PATH, QUERY_STATION_IDs, QUERY_DATE_FROM, QUERY_DATE_END, 201 | SELECTED_FIELDS) 202 | -------------------------------------------------------------------------------- /HydroDataDownload/anhui_precipitation_download.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf-8 3 | # 4 | # Author: Liang-Jun Zhu 5 | # Email: zlj@lreis.ac.cn 6 | # 7 | 8 | import httplib, time 9 | from xml.etree import ElementTree as xmlTree 10 | 11 | 12 | def GetYcRainSum(sTime, eTime, stcd, tDivide): 13 | params = \ 14 | ''' 15 | 16 | 17 | 18 | %s 19 | %s 20 | %s 21 | %s 22 | 23 | 24 | ''' 25 | SoapMessage = params % (stcd, sTime, eTime, tDivide) 26 | 27 | def getXML(sTime, eTime, stcd, tDivide): 28 | try: 29 | conn = httplib.HTTP("yc.wswj.net") 30 | conn.putrequest("POST", "/ahyc/web_rain/Service.asmx") 31 | conn.putheader("Accept", "*/*") 32 | conn.putheader("Accept-Encoding", "gzip,deflate") 33 | conn.putheader("Accept-Language", "zh-CN,zh;q=0.8,en;q=0.6") 34 | conn.putheader("Host", "yc.wswj.net") 35 | conn.putheader("Origin", "http://yc.wswj.net") 36 | # conn.putheader("Connection","keep-alive") 37 | conn.putheader("Referer", "http://yc.wswj.net/ahyc/Main73.swf") 38 | conn.putheader("User-Agent", 39 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36") 40 | conn.putheader("Content-Type", "text/xml; charset=utf-8") 41 | conn.putheader("Content-Length", "%d" % len(SoapMessage)) 42 | conn.putheader("SOAPAction", "http://tempuri.org/getYlZzt") 43 | conn.putheader("Cookie", 44 | "td_cookie=18446744071625666010; CNZZDATA3906820=cnzz_eid%3D1560662269-1465952761-http%253A%252F%252Fyc.wswj.net%252F%26ntime%3D1465952761;ASP.NET_SessionId=nurmvus304xbfjofntol04jf") 45 | conn.endheaders() 46 | conn.send(SoapMessage) 47 | # conn.set_debuglevel(1) 48 | statuscode, statusmessage, header = conn.getreply() 49 | #print "Response: ", statuscode, statusmessage 50 | if statuscode == 200: 51 | #print "Headers: ", header 52 | Res = conn.getfile().read() 53 | #print str(Res).decode('utf-8') 54 | return Res 55 | ## else: 56 | ## time.sleep(20) 57 | ## return getXML(sTime, eTime, stcd, tDivide) 58 | except: 59 | time.sleep(20) 60 | return getXML(sTime, eTime, stcd, tDivide) 61 | 62 | return getXML(sTime, eTime, stcd, tDivide) 63 | 64 | 65 | def SaveXML2Csv(Res, findName, savePath, year): 66 | tree = xmlTree.fromstring(Res) 67 | nodes = tree.findall(findName) 68 | if not nodes: 69 | return 0 70 | else: 71 | f = open(savePath, "a") 72 | ## titleline = "" 73 | ## for Title in nodes[0]: 74 | ## titleline += str(Title.tag.encode('utf-8'))+"," 75 | ## titleline+="\n" 76 | ## f.write(titleline) 77 | for node in nodes: 78 | itemline = "" 79 | ### previous code, deprecated 80 | # month = str(node[0].text.encode('utf-8'))[0:2] 81 | # day = str(node[0].text.encode('utf-8'))[5:7] 82 | # HH = str(node[0].text.encode('utf-8'))[10:12] 83 | ### updated by LJ, 2016-6-15 84 | timeString = node[0].text 85 | valueString = node[1].text 86 | yyyy = timeString[0:4] 87 | mm = timeString[5:7] 88 | dd = timeString[8:10] 89 | HHMM = timeString[11:16] 90 | itemline += yyyy + "/" + mm + "/" + dd + " " + HHMM + "," + valueString + "\n" 91 | # print itemline 92 | f.write(itemline) 93 | f.close() 94 | return 1 95 | 96 | 97 | if __name__ == '__main__': 98 | print ("Beigin to download Anhui Rainfall data!") 99 | 100 | ## newSiteLines = [] 101 | ## for i in range(len(ZhanHao)): 102 | ## xmlText = GetYcRainSum(ZhanHao[i], "2014-07-01 08:00", "2014-07-03 08:00", "1440") 103 | ## #print str(xmlText).decode('utf-8') 104 | ## savePath = r'E:\RainfallData_Anhui\YlZzt\%s.csv' % ZhanMing[i] 105 | ## #print savePath 106 | ## if SaveXML2Csv(xmlText, ".//YLZZT", savePath): 107 | ## newSiteLine = ZhanHao[i]+","+ZhanMing[i] 108 | ## newSiteLines.append(newSiteLine) 109 | ## print i,newSiteLine 110 | ## print len(newSiteLines) 111 | ## f=open(r"e:\NewRainfallSites.txt","w") 112 | ## for line in newSiteLines: 113 | ## f.write(line) 114 | ## f.close() 115 | 116 | f=open(r"C:\z_data\zhongTianShe\climate\pcp_download_ahyc\ZhanHao_zhongtianshe.txt","r") 117 | ZhanHaos = [] 118 | for eachSite in f: 119 | ZhanHaos.append(eachSite.split('\n')[0]) 120 | f.close() 121 | print len(ZhanHaos) 122 | print ZhanHaos 123 | # ZhanHaos = ['62903180', '62942837', '62902700'] 124 | 125 | 126 | def downData(start, end, ZhanHao, year): 127 | xmlText = GetYcRainSum(start, end, ZhanHao, "1440") 128 | savePath = r'C:\z_data\zhongTianShe\climate\pcp_download_ahyc\daily\%s-%s.txt' % (ZhanHao, str(year)) 129 | success = SaveXML2Csv(xmlText, ".//data", savePath, year) 130 | # print success 131 | 132 | 133 | # ZhanHaos = ['62903180','62942737','62942707','62915310','62933800','62942747','62922800','62942717','62942757'] 134 | # years = ['2005','2006','2007','2008','2009','2010','2011','2012','2013','2014'] 135 | years = ['2011','2012', '2013'] 136 | # months = ['01','02','03','04','05','06','07','08','09','10','11','12'] 137 | # downData('2013-12-01 00:00','2013-12-31 00:00', '62903180') 138 | 139 | for ZhanHao in ZhanHaos: 140 | for year in years: 141 | print "Downloading " + str(ZhanHao) + "'s data in " + str(year) + " ..." 142 | sTime = str(year) + '-01-01 00:00' 143 | eTime = str(year) + '-12-31 23:00' 144 | print ZhanHao, sTime, eTime 145 | downData(sTime, eTime, ZhanHao, year) 146 | print "Download " + str(ZhanHao) + "'s data successfully!" 147 | 148 | print "Download Succeed!" 149 | -------------------------------------------------------------------------------- /HydroDataDownload/climate_download.py: -------------------------------------------------------------------------------- 1 | 2 | # coding=utf-8 3 | # Author: Liangjun Zhu 4 | # Date : 2016-4-7 5 | # Email : zlj@lreis.ac.cn 6 | # Blog : zhulj.net 7 | 8 | import urllib2 9 | import os 10 | import sys 11 | import time 12 | 13 | 14 | def currentPath(): 15 | path = sys.path[0] 16 | if os.path.isdir(path): 17 | return path 18 | elif os.path.isfile(path): 19 | return os.path.dirname(path) 20 | 21 | 22 | def mkdir(dir): 23 | if not os.path.isdir(dir): 24 | os.mkdir(dir) 25 | 26 | 27 | def downloadByUrl(curUrl, filePath): 28 | f = urllib2.urlopen(curUrl) 29 | data = f.read() 30 | with open(filePath, "wb") as code: 31 | code.write(data) 32 | 33 | 34 | def findUrlTxts(path): 35 | tempFiles = os.listdir(path) 36 | urlTxts = [] 37 | for s in tempFiles: 38 | if s.split(".")[-1] == 'txt': 39 | urlTxts.append(path + os.sep + s) 40 | return urlTxts 41 | 42 | 43 | def ReadUrls(files): 44 | urls = [] 45 | for file in files: 46 | curF = open(file) 47 | for line in curF: 48 | urls.append(line) 49 | curF.close() 50 | return urls 51 | 52 | 53 | def findStations(urls): 54 | stations = [] 55 | for curUrl in urls: 56 | temp = curUrl.split("?")[0] 57 | fileName = temp.split("/")[-1] 58 | sss = fileName.split('-') 59 | for ss in sss: 60 | if len(ss) == 5 and not ss in stations: 61 | stations.append(ss) 62 | return stations 63 | 64 | 65 | def isStationNeeded(name): 66 | temp = name.split('-') 67 | flag = False 68 | for s in temp: 69 | if len(s) == 5: 70 | flag = True 71 | break 72 | return flag 73 | 74 | 75 | def climateDown(urls, savePath, eachNum = 200, timeout = 5): 76 | count = 1 77 | allcount = len(urls) 78 | for curUrl in urls: 79 | temp = curUrl.split("?")[0] 80 | saveName = temp.split("/")[-1] 81 | if isStationNeeded(saveName): 82 | curSavePath = savePath + os.sep + saveName 83 | if count % eachNum == 0: 84 | time.sleep(timeout) 85 | downloadByUrl(curUrl, curSavePath) 86 | print " %d / %d, %s" % (count, allcount, saveName) 87 | count += 1 88 | 89 | 90 | if __name__ == '__main__': 91 | CUR_PATH = currentPath() 92 | CUR_PATH = r'C:\Users\ZhuLJ\Desktop\climate_data_download' 93 | DOWN_PATH = CUR_PATH + os.sep + 'download' 94 | mkdir(DOWN_PATH) 95 | urlTxts = findUrlTxts(CUR_PATH) 96 | urls = ReadUrls(urlTxts) 97 | climateDown(urls, DOWN_PATH, 200, 5) 98 | -------------------------------------------------------------------------------- /HydroDataDownload/netcdf4_pydap_test.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import netcdf4_pydap 4 | 5 | credentials={'username': 'zhuliangjun', 6 | 'password': 'Liangjun0130', 7 | 'authentication_url': 'https://urs.earthdata.nasa.gov/'} 8 | url = ('http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/' 9 | 'TRMM_3B42_Daily.7/2016/10/3B42_Daily.20161019.7.nc4') 10 | 11 | with netcdf4_pydap.Dataset(url, **credentials) as dataset: 12 | data = dataset.variables['SLP'][0,:,:] 13 | plt.contourf(np.squeeze(data)) 14 | plt.show() -------------------------------------------------------------------------------- /HydroDataDownload/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | from cookielib import CookieJar 3 | from urllib import urlencode 4 | 5 | import urllib2 6 | 7 | # The user credentials that will be used to authenticate access to the data 8 | 9 | username = "zhuliangjun" 10 | password = "Liangjun0130" 11 | 12 | # The url of the file we wish to retrieve 13 | 14 | url = "http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/TRMM_3B42_Daily.7/" \ 15 | "2016/10/3B42_Daily.20161019.7.nc4" 16 | 17 | # Create a password manager to deal with the 401 reponse that is returned from 18 | # Earthdata Login 19 | 20 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() 21 | password_manager.add_password(None, "https://urs.earthdata.nasa.gov", username, password) 22 | 23 | # Create a cookie jar for storing cookies. This is used to store and return 24 | # the session cookie given to use by the data server (otherwise it will just 25 | # keep sending us back to Earthdata Login to authenticate). Ideally, we 26 | # should use a file based cookie jar to preserve cookies between runs. This 27 | # will make it much more efficient. 28 | 29 | cookie_jar = CookieJar() 30 | 31 | # Install all the handlers. 32 | 33 | opener = urllib2.build_opener( 34 | urllib2.HTTPBasicAuthHandler(password_manager), 35 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see 36 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses 37 | urllib2.HTTPCookieProcessor(cookie_jar)) 38 | urllib2.install_opener(opener) 39 | 40 | # Create and submit the request. There are a wide range of exceptions that 41 | # can be thrown here, including HTTPError and URLError. These should be 42 | # caught and handled. 43 | 44 | request = urllib2.Request(url) 45 | response = urllib2.urlopen(request) 46 | 47 | # Print out the result (not a good idea with binary data!) 48 | 49 | body = response.read() 50 | print body 51 | -------------------------------------------------------------------------------- /HydroDataDownload/trmm_download.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Author: Liangjun Zhu 3 | # Date : 2016-4-7 4 | # Email : zlj@lreis.ac.cn 5 | # Blog : zhulj.net 6 | 7 | import urllib2 8 | import os 9 | import sys 10 | import time 11 | 12 | 13 | def currentPath(): 14 | path = sys.path[0] 15 | if os.path.isdir(path): 16 | return path 17 | elif os.path.isfile(path): 18 | return os.path.dirname(path) 19 | 20 | 21 | def mkdir(dir): 22 | if not os.path.isdir(dir): 23 | os.mkdir(dir) 24 | 25 | 26 | def downloadByUrl(curUrl, filePath): 27 | f = urllib2.urlopen(curUrl) 28 | data = f.read() 29 | with open(filePath, "wb") as code: 30 | code.write(data) 31 | 32 | 33 | def findUrlTxts(path): 34 | tempFiles = os.listdir(path) 35 | urlTxts = [] 36 | for s in tempFiles: 37 | if s.split(".")[-1] == 'txt': 38 | urlTxts.append(path + os.sep + s) 39 | return urlTxts 40 | 41 | 42 | def ReadUrls(files): 43 | urls = [] 44 | for file in files: 45 | curF = open(file) 46 | for line in curF: 47 | line = line.split('\n')[0] 48 | urls.append(line) 49 | curF.close() 50 | return urls 51 | 52 | 53 | def findStations(urls): 54 | stations = [] 55 | for curUrl in urls: 56 | temp = curUrl.split("?")[0] 57 | fileName = temp.split("/")[-1] 58 | sss = fileName.split('-') 59 | for ss in sss: 60 | if len(ss) == 5 and not ss in stations: 61 | stations.append(ss) 62 | return stations 63 | 64 | def climateDown(urls, savePath, usrname = '', pwd = '', eachNum = 200, timeout = 5): 65 | count = 1 66 | allcount = len(urls) 67 | for curUrl in urls: 68 | saveName = curUrl.split("/")[-1] 69 | curSavePath = savePath + os.sep + saveName 70 | if count % eachNum == 0: 71 | time.sleep(timeout) 72 | if usrname != '' and pwd != '': 73 | downNASAEarthdata(curUrl, curSavePath, usrname, pwd) 74 | else: 75 | downloadByUrl(curUrl, curSavePath) 76 | print " %d / %d, %s" % (count, allcount, saveName) 77 | count += 1 78 | 79 | def downNASAEarthdata(curUrl, curSavePath, usrname, pwd): 80 | from cookielib import CookieJar 81 | from urllib import urlencode 82 | import urllib2 83 | 84 | # The user credentials that will be used to authenticate access to the data 85 | # 86 | # username = "zhuliangjun" 87 | # password = "Liangjun0130" 88 | # 89 | # # The url of the file we wish to retrieve 90 | # 91 | # url = "http://disc2.gesdisc.eosdis.nasa.gov/data//TRMM_L3/TRMM_3B42_Daily.7/" \ 92 | # "2016/10/3B42_Daily.20161019.7.nc4" 93 | 94 | # Create a password manager to deal with the 401 reponse that is returned from 95 | # Earthdata Login 96 | 97 | password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() 98 | password_manager.add_password(None, "https://urs.earthdata.nasa.gov", usrname, pwd) 99 | 100 | # Create a cookie jar for storing cookies. This is used to store and return 101 | # the session cookie given to use by the data server (otherwise it will just 102 | # keep sending us back to Earthdata Login to authenticate). Ideally, we 103 | # should use a file based cookie jar to preserve cookies between runs. This 104 | # will make it much more efficient. 105 | 106 | cookie_jar = CookieJar() 107 | 108 | # Install all the handlers. 109 | 110 | opener = urllib2.build_opener( 111 | urllib2.HTTPBasicAuthHandler(password_manager), 112 | # urllib2.HTTPHandler(debuglevel=1), # Uncomment these two lines to see 113 | # urllib2.HTTPSHandler(debuglevel=1), # details of the requests/responses 114 | urllib2.HTTPCookieProcessor(cookie_jar)) 115 | urllib2.install_opener(opener) 116 | 117 | # Create and submit the request. There are a wide range of exceptions that 118 | # can be thrown here, including HTTPError and URLError. These should be 119 | # caught and handled. 120 | 121 | request = urllib2.Request(curUrl) 122 | response = urllib2.urlopen(request) 123 | 124 | # Print out the result (not a good idea with binary data!) 125 | 126 | data = response.read() 127 | with open(curSavePath, "wb") as code: 128 | code.write(data) 129 | 130 | if __name__ == '__main__': 131 | CUR_PATH = currentPath() 132 | CUR_PATH = r'C:\Users\ZhuLJ\Desktop\TRMM_download' 133 | usrname = 'zhuliangjun' 134 | pwd = 'Liangjun0130' 135 | DOWN_PATH = CUR_PATH + os.sep + 'download' 136 | mkdir(DOWN_PATH) 137 | urlTxts = findUrlTxts(CUR_PATH) 138 | urls = ReadUrls(urlTxts) 139 | climateDown(urls, DOWN_PATH, usrname = usrname, pwd = pwd) 140 | 141 | -------------------------------------------------------------------------------- /Hydrograph/Hydrograph-Storm.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | #import pylab 3 | import datetime,os,sys 4 | import matplotlib.pyplot as plt 5 | from matplotlib.dates import HourLocator, DateFormatter 6 | import os, time 7 | 8 | def PlotResult(tStart, tEnd, qFile, clr, tOffset=0): 9 | f = open(qFile) 10 | lines = f.readlines() 11 | f.close() 12 | 13 | tlist = [] 14 | qlist = [] 15 | for line in lines: 16 | items = line.split() 17 | date = datetime.datetime.strptime(items[0]+" "+items[1], '%Y-%m-%d %X') 18 | date = datetime.timedelta(minutes=tOffset) + date 19 | #print date 20 | if date < tStart or date > tEnd: 21 | continue 22 | tlist.append(date) 23 | qlist.append(float(items[2])) 24 | p, = plt.plot_date(tlist, qlist, clr,xdate=True, ydate=False, ls='-', marker='o', linewidth=2.0) 25 | return qlist, p 26 | 27 | def PlotPrec(ax, precFile, tStart, tEnd, clr): 28 | f = open(precFile) 29 | lines = f.readlines() 30 | f.close() 31 | tlist = [] 32 | qlist = [] 33 | for line in lines: 34 | items = line.split() 35 | startDate = datetime.datetime.strptime(items[0]+" "+items[1], '%Y-%m-%d %X') 36 | endDate = datetime.datetime.strptime(items[0]+" "+items[2], '%Y-%m-%d %X') 37 | if startDate < tStart or endDate > tEnd: 38 | continue 39 | tlist.append(startDate) 40 | tlist.append(startDate) 41 | tlist.append(endDate) 42 | tlist.append(endDate) 43 | qlist.append(0) 44 | qlist.append(float(items[3])) 45 | qlist.append(float(items[3])) 46 | qlist.append(0) 47 | 48 | p, = ax.plot_date(tlist, qlist, clr,xdate=True, ydate=False) 49 | ax.fill(tlist,qlist,'b') 50 | return qlist, p 51 | 52 | def NashCoef(qObs, qSimu): 53 | n = min(len(qObs), len(qSimu)) 54 | ave = sum(qObs)/n 55 | a1 = 0 56 | a2 = 0 57 | for i in range(n): 58 | a1 = a1 + pow(qObs[i]-qSimu[i], 2) 59 | a2 = a2 + pow(qObs[i] - ave, 2) 60 | return 1 - a1/a2 61 | 62 | def currentPath(): 63 | path = sys.path[0] 64 | if os.path.isdir(path): 65 | return path 66 | elif os.path.isfile(path): 67 | return os.path.dirname(path) 68 | if __name__ == '__main__': 69 | year=1988 70 | tStart = datetime.datetime(year, 8, 7, 19) 71 | tEnd = datetime.datetime(year, 8, 8, 19) 72 | baseFolder = currentPath() 73 | 74 | fig, ax = plt.subplots() 75 | fig.autofmt_xdate() 76 | #fig.autofmt_xdate() this code should be here, other than the end of this program!!! 77 | sim_qFile = baseFolder+r'\simuS.txt' 78 | obs_qFile = baseFolder+r'\obsS.txt' 79 | tOffset = 0 80 | qSimu, pSimu = PlotResult(tStart, tEnd, sim_qFile, 'r', tOffset) 81 | qObs, pObs = PlotResult(tStart, tEnd, obs_qFile, 'g', tOffset) 82 | fsize = 16 83 | plt.xlabel(u"Time",fontsize=fsize) 84 | plt.ylabel(u'Discharge(m3/s)',fontsize=fsize) 85 | 86 | plt.legend([pObs, pSimu], ["Observation", "Simulation"], loc=7) 87 | ns = NashCoef(qObs, qSimu) 88 | plt.title("Nash: %.3f" % (ns,)) 89 | ax.set_ylim(min(min(qSimu),min(qObs))-10,1.4*max(max(qSimu),max(qObs))) 90 | 91 | ax2 = ax.twinx() 92 | ax2.set_ylabel(r"Precipitation (mm)", fontsize=fsize) 93 | precFile = baseFolder+r'\prec.txt' 94 | precList, precP = PlotPrec(ax2, precFile, tStart, tEnd, 'b') 95 | ax2.set_ylim(4*max(precList),0) 96 | 97 | hours = HourLocator(byhour=range(24),interval=2) 98 | hoursFmt = DateFormatter('%b,%d %Hh') 99 | ax.xaxis.set_major_locator(hours) 100 | ax.xaxis.set_major_formatter(hoursFmt) 101 | ax.autoscale_view() 102 | # ax2.xaxis.set_major_locator(hours) 103 | # ax2.xaxis.set_major_formatter(hoursFmt) 104 | # ax2.autoscale_view() 105 | 106 | plt.grid(True) 107 | plt.show() 108 | 109 | print "Succeed!" 110 | -------------------------------------------------------------------------------- /Hydrograph/ObsS.txt: -------------------------------------------------------------------------------- 1 | 1988-8-7 20:00:0 0.0004 2 | 1988-8-7 20:20:0 0.06136 3 | 1988-8-7 20:40:0 31.6707 4 | 1988-8-7 20:50:0 132.06 5 | 1988-8-7 21:00:0 203.742 6 | 1988-8-7 21:20:0 254.04 7 | 1988-8-7 21:40:0 121.672 8 | 1988-8-7 21:50:0 37.572 9 | 1988-8-7 22:00:0 7.9666 10 | 1988-8-7 22:30:0 6.8472 11 | 1988-8-7 23:00:0 1.68084 12 | 1988-8-8 01:40:0 0.41377 13 | 1988-8-8 08:00:0 2.5137 14 | 1988-8-8 08:35:0 13.2912 15 | 1988-8-8 09:00:0 21.1523 16 | 1988-8-8 09:15:0 29.7756 17 | 1988-8-8 09:40:0 14.7405 18 | 1988-8-8 12:10:0 0.67691 19 | -------------------------------------------------------------------------------- /Hydrograph/prec.txt: -------------------------------------------------------------------------------- 1 | 1988-8-7 15:10:00 15:20:00 0.3 2 | 1988-8-7 20:10:00 20:18:00 7.7 3 | 1988-8-7 20:18:00 20:20:00 3 4 | 1988-8-7 20:20:00 20:35:00 2.6 5 | 1988-8-7 20:35:00 20:38:00 4.4 6 | 1988-8-7 20:38:00 20:47:00 10 7 | 1988-8-7 20:47:00 21:00:00 6.9 8 | 1988-8-7 21:00:00 21:10:00 1.1 9 | 1988-8-7 21:10:00 21:14:00 2 10 | 1988-8-7 21:14:00 21:20:00 2.8 11 | 1988-8-7 21:20:00 22:00:00 6 12 | 1988-8-7 22:00:00 23:00:00 7.6 13 | 1988-8-7 23:00:00 23:20:00 0.8 14 | 1988-8-8 1:35:00 2:00:00 0.4 15 | 1988-8-8 2:00:00 2:10:00 0.2 16 | 1988-8-8 2:10:00 2:23:00 2.2 17 | 1988-8-8 2:23:00 2:50:00 3.1 18 | 1988-8-8 3:15:00 4:00:00 2.4 19 | 1988-8-8 4:00:00 5:00:00 5.1 20 | 1988-8-8 5:00:00 5:35:00 0.9 21 | 1988-8-8 7:35:00 8:00:00 0.7 22 | 1988-8-8 8:00:00 8:13:00 7.8 23 | 1988-8-8 8:13:00 8:39:00 10 24 | 1988-8-8 8:39:00 8:40:00 0.7 25 | 1988-8-8 8:40:00 9:00:00 1.5 26 | 1988-8-8 9:00:00 10:00:00 1.2 27 | 1988-8-8 11:55:00 12:10:00 0.2 28 | -------------------------------------------------------------------------------- /Hydrograph/simuS.txt: -------------------------------------------------------------------------------- 1 | 1988-8-7 20:00:0 0 2 | 1988-8-7 20:20:0 0 3 | 1988-8-7 20:40:0 0.00087 4 | 1988-8-7 20:50:0 59.17112 5 | 1988-8-7 21:00:0 196.16806 6 | 1988-8-7 21:20:0 212.47784 7 | 1988-8-7 21:40:0 83.86438 8 | 1988-8-7 21:50:0 55.98974 9 | 1988-8-7 22:00:0 36.89104 10 | 1988-8-7 22:30:0 5.05639 11 | 1988-8-7 23:00:0 0.82169 12 | 1988-8-8 01:40:0 0.00002 13 | 1988-8-8 08:00:0 0 14 | 1988-8-8 08:35:0 0.08759 15 | 1988-8-8 09:00:0 2.43216 16 | 1988-8-8 09:15:0 11.56709 17 | 1988-8-8 09:40:0 45.27423 18 | 1988-8-8 12:10:0 0.03099 19 | -------------------------------------------------------------------------------- /NSGA2/.idea/NSGA2.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /NSGA2/.idea/encodings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /NSGA2/.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /NSGA2/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /NSGA2/.idea/workspace.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 26 | 27 | 28 | 1476164274860 29 | 33 | 34 | 35 | 36 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /NSGA2/deap/dbf_test.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | from dbfread import DBF 4 | 5 | if __name__ == '__main__': 6 | dbff = r'C:\Users\ZhuLJ\Desktop\test\aug2.DBF' 7 | csvf = r'C:\Users\ZhuLJ\Desktop\test\aug2.csv' 8 | table = DBF(dbff) 9 | f = open(csvf, 'w') 10 | writerobj = csv.writer(f, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) 11 | header = [v.encode('utf-8') for v in table.field_names] 12 | writerobj.writerow(header) 13 | for record in table: 14 | currec = list() 15 | for v in record.values(): 16 | if isinstance(v, unicode): 17 | v = v.encode('utf-8') 18 | elif v is None: 19 | v = '' 20 | else: 21 | v = str(v) 22 | if len(v) > 8 and (v[0:2] == '14' or v[0:2] == '13' or v[0:2] == '622'): 23 | v = '\'%s' % v 24 | currec.append(v) 25 | writerobj.writerow(currec) 26 | f.close() 27 | -------------------------------------------------------------------------------- /NSGA2/deap/demo1.py: -------------------------------------------------------------------------------- 1 | # Overview of DEAP document. 2 | import pickle 3 | import random 4 | 5 | import numpy 6 | from deap import base, creator 7 | from deap import tools 8 | 9 | # 1. Types 10 | 11 | # Create a class named 'FitnessMin', inherited from base.Fitness, has the weights attribute 12 | creator.create('FitnessMin', base.Fitness, weights=(1.0,)) 13 | creator.create('Individual', list, fitness=creator.FitnessMin) 14 | 15 | # another example of creator a class, and initialize an object. 16 | creator.create("Foo", list, bar=dict, spam=1) 17 | x = creator.Foo() 18 | print x.bar, x.spam # {} 1 19 | 20 | # 2. Initialization 21 | IND_SIZE = 10 22 | toolbox = base.Toolbox() 23 | 24 | 25 | # example of how to use Toolbox 26 | def func(a, b, c=3): 27 | print a, b, c 28 | 29 | 30 | toolbox.register('myFunc', func, 2, c=4) 31 | toolbox.register('myFunc2', func) 32 | toolbox.myFunc(3) # 2 3 4, the register and call statements is equal to func(2, 3, 4) 33 | toolbox.myFunc2(2, 3, 4) 34 | 35 | 36 | class initParam(object): 37 | """Test.""" 38 | 39 | def __init__(self, v): 40 | print ('initial, v: %f' % v) 41 | self.multiply = v 42 | self.fid = random.random() 43 | @staticmethod 44 | def get_random(v): 45 | cc = initParam(v) 46 | l = list() 47 | for i in range(10): 48 | l.append(random.random() * cc.multiply) 49 | print ('get_random, v: %s' % ','.join(str(i) for i in l)) 50 | return l 51 | 52 | 53 | def initRepeatWithCfg(container, generator, cf, n=2): 54 | return container(generator(cf) for _ in xrange(n)) 55 | 56 | 57 | def initIterateWithCfg(container, generator, cf): 58 | return container(generator(cf)) 59 | 60 | 61 | toolbox.register('attribute', initParam.get_random) 62 | # toolbox.register('individual', initRepeatWithCfg, creator.Individual, 63 | # toolbox.attribute, n=IND_SIZE) 64 | toolbox.register('individual', initIterateWithCfg, creator.Individual, toolbox.attribute) 65 | toolbox.register('population', initRepeatWithCfg, list, toolbox.individual) 66 | 67 | 68 | # 3. Operators 69 | def evaluate(individual, n): 70 | return sum(individual) / n 71 | 72 | 73 | toolbox.register('mate', tools.cxTwoPoint) 74 | toolbox.register('mutate', tools.mutGaussian, mu=0, sigma=1, indpb=0.1) 75 | toolbox.register('select', tools.selTournament, tournsize=3) 76 | toolbox.register('evaluate', evaluate) 77 | 78 | stats = tools.Statistics(key=lambda ind: ind.fitness.values) 79 | stats.register('avg', numpy.mean, axis=0) 80 | stats.register('std', numpy.std, axis=0) 81 | stats.register('min', numpy.min, axis=0) 82 | stats.register('max', numpy.max, axis=0) 83 | 84 | logbook = tools.Logbook() 85 | 86 | 87 | def main(): 88 | cc = initParam(0.8) 89 | pop = toolbox.population(0.8, n=50) 90 | 91 | CXPB, MUTPB, NGEN = 0.5, 0.2, 40 92 | 93 | # evaluate the entire population 94 | fitnesses = map(toolbox.evaluate, pop, [9]*50) 95 | print len(fitnesses) # 50 96 | for ind, fit in zip(pop, fitnesses): 97 | ind.fitness.values = (fit,) 98 | 99 | for g in range(NGEN): 100 | # select the next generation individuals 101 | offspring = toolbox.select(pop, len(pop)) 102 | # clone the selected individuals 103 | offspring = map(toolbox.clone, offspring) 104 | # Apply crossover and mutation on the offspring 105 | for child1, child2 in zip(offspring[::2], offspring[1::2]): 106 | if random.random() < CXPB: 107 | toolbox.mate(child1, child2) 108 | del child1.fitness.values 109 | del child2.fitness.values 110 | for mutant in offspring: 111 | if random.random() < MUTPB: 112 | toolbox.mutate(mutant) 113 | del mutant.fitness.values 114 | 115 | # Evaluate the individuals with an invalid fitness 116 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid] 117 | fitnesses = map(toolbox.evaluate, invalid_ind, [9]*len(invalid_ind)) 118 | for ind, fit in zip(invalid_ind, fitnesses): 119 | ind.fitness.values = (fit,) 120 | 121 | # The population is entirely replaced by the offspring 122 | pop[:] = offspring 123 | record = stats.compile(pop) 124 | # print record 125 | logbook.record(gen=g, **record) 126 | return pop, logbook 127 | 128 | 129 | if __name__ == '__main__': 130 | main() 131 | logbook.header = 'gen', 'avg' 132 | print logbook 133 | gen = logbook.select('gen') 134 | fit_maxs = logbook.select('max') 135 | # import matplotlib.pyplot as plt 136 | # fig, ax1 = plt.subplots() 137 | # line1 = ax1.plot(gen, fit_maxs, 'b', label='Maximum fitness') 138 | # ax1.set_xlabel('Generation') 139 | # ax1.set_ylabel('Fitness', color='b') 140 | # for t1 in ax1.get_yticklabels(): 141 | # t1.set_color('b') 142 | # labs = [l.get_label() for l in line1] 143 | # ax1.legend(line1, labs, loc='center right') 144 | # plt.show() 145 | 146 | # output logbook 147 | f = open(r'D:\tmp\logbook.txt', 'w') 148 | f.write(logbook.__str__()) 149 | f.close() 150 | -------------------------------------------------------------------------------- /NSGA2/inspyred/nsga_example_inspyred.py: -------------------------------------------------------------------------------- 1 | from random import Random 2 | from time import time 3 | import inspyred 4 | import matplotlib as plt 5 | 6 | def main(prng=None, display=False): 7 | if prng is None: 8 | prng = Random() 9 | prng.seed(time()) 10 | 11 | problem = inspyred.benchmarks.Kursawe(3) 12 | ea = inspyred.ec.emo.NSGA2(prng) 13 | ea.variator = [inspyred.ec.variators.blend_crossover, 14 | inspyred.ec.variators.gaussian_mutation] 15 | ea.terminator = inspyred.ec.terminators.generation_termination 16 | final_pop = ea.evolve(generator=problem.generator, 17 | evaluator=problem.evaluator, 18 | pop_size=100, 19 | maximize=problem.maximize, 20 | bounder=problem.bounder, 21 | max_generations=80) 22 | 23 | if display: 24 | final_arc = ea.archive 25 | print('Best Solutions: \n') 26 | for f in final_arc: 27 | print(f) 28 | import matplotlib.pyplot as plt 29 | x = [] 30 | y = [] 31 | for f in final_arc: 32 | x.append(f.fitness[0]) 33 | y.append(f.fitness[1]) 34 | plt.scatter(x, y, color='b') 35 | # plt.savefig('{0} Example ({1}).pdf'.format(ea.__class__.__name__, 36 | # problem.__class__.__name__), 37 | # format='pdf') 38 | plt.show() 39 | return ea 40 | 41 | if __name__ == '__main__': 42 | main(display=True) 43 | -------------------------------------------------------------------------------- /NSGA2/inspyred/parallel_evaluation_pp_example.py: -------------------------------------------------------------------------------- 1 | from random import Random 2 | from time import time 3 | import inspyred 4 | import math 5 | 6 | # Define an additional "necessary" function for the evaluator 7 | # to see how it must be handled when using pp. 8 | def my_squaring_function(x): 9 | return x**2 10 | 11 | def generate_rastrigin(random, args): 12 | size = args.get('num_inputs', 10) 13 | return [random.uniform(-5.12, 5.12) for i in range(size)] 14 | 15 | def evaluate_rastrigin(candidates, args): 16 | fitness = [] 17 | for cs in candidates: 18 | fit = 10 * len(cs) + sum([(my_squaring_function(x - 1) - 19 | 10 * math.cos(2 * math.pi * (x - 1))) 20 | for x in cs]) 21 | fitness.append(fit) 22 | return fitness 23 | 24 | def main(prng=None, display=False): 25 | if prng is None: 26 | prng = Random() 27 | prng.seed(time()) 28 | 29 | ea = inspyred.ec.DEA(prng) 30 | if display: 31 | ea.observer = inspyred.ec.observers.stats_observer 32 | ea.terminator = inspyred.ec.terminators.evaluation_termination 33 | final_pop = ea.evolve(generator=generate_rastrigin, 34 | evaluator=inspyred.ec.evaluators.parallel_evaluation_pp, 35 | pp_evaluator=evaluate_rastrigin, 36 | pp_dependencies=(my_squaring_function,), 37 | pp_modules=("math",), 38 | pop_size=8, 39 | bounder=inspyred.ec.Bounder(-5.12, 5.12), 40 | maximize=False, 41 | max_evaluations=256, 42 | num_inputs=3) 43 | 44 | if display: 45 | best = max(final_pop) 46 | print('Best Solution: \n{0}'.format(str(best))) 47 | return ea 48 | 49 | if __name__ == '__main__': 50 | main(display=True) 51 | -------------------------------------------------------------------------------- /NSGA2/nsga_example.py: -------------------------------------------------------------------------------- 1 | from random import Random 2 | from time import time 3 | import inspyred 4 | 5 | def main(prng=None, display=False): 6 | if prng is None: 7 | prng = Random() 8 | prng.seed(time()) 9 | 10 | problem = inspyred.benchmarks.Kursawe(3) 11 | ea = inspyred.ec.emo.NSGA2(prng) 12 | ea.variator = [inspyred.ec.variators.blend_crossover, 13 | inspyred.ec.variators.gaussian_mutation] 14 | ea.terminator = inspyred.ec.terminators.generation_termination 15 | final_pop = ea.evolve(generator=problem.generator, 16 | evaluator=problem.evaluator, 17 | pop_size=100, 18 | maximize=problem.maximize, 19 | bounder=problem.bounder, 20 | max_generations=80) 21 | 22 | if display: 23 | final_arc = ea.archive 24 | print('Best Solutions: \n') 25 | for f in final_arc: 26 | print(f) 27 | import matplotlib.pyplot as plt 28 | x = [] 29 | y = [] 30 | for f in final_arc: 31 | x.append(f.fitness[0]) 32 | y.append(f.fitness[1]) 33 | plt.scatter(x, y, color='b') 34 | # plt.savefig('{0} Example ({1}).pdf'.format(ea.__class__.__name__, 35 | # problem.__class__.__name__), 36 | # format='pdf') 37 | plt.show() 38 | return ea 39 | 40 | if __name__ == '__main__': 41 | main(display=True) 42 | -------------------------------------------------------------------------------- /NSGA2/parallel_evaluation_pp_example.py: -------------------------------------------------------------------------------- 1 | from random import Random 2 | from time import time 3 | import inspyred 4 | import math 5 | 6 | # Define an additional "necessary" function for the evaluator 7 | # to see how it must be handled when using pp. 8 | def my_squaring_function(x): 9 | return x**2 10 | 11 | def generate_rastrigin(random, args): 12 | size = args.get('num_inputs', 10) 13 | return [random.uniform(-5.12, 5.12) for i in range(size)] 14 | 15 | def evaluate_rastrigin(candidates, args): 16 | fitness = [] 17 | for cs in candidates: 18 | fit = 10 * len(cs) + sum([(my_squaring_function(x - 1) - 19 | 10 * math.cos(2 * math.pi * (x - 1))) 20 | for x in cs]) 21 | fitness.append(fit) 22 | return fitness 23 | 24 | def main(prng=None, display=False): 25 | if prng is None: 26 | prng = Random() 27 | prng.seed(time()) 28 | 29 | ea = inspyred.ec.DEA(prng) 30 | if display: 31 | ea.observer = inspyred.ec.observers.stats_observer 32 | ea.terminator = inspyred.ec.terminators.evaluation_termination 33 | final_pop = ea.evolve(generator=generate_rastrigin, 34 | evaluator=inspyred.ec.evaluators.parallel_evaluation_pp, 35 | pp_evaluator=evaluate_rastrigin, 36 | pp_dependencies=(my_squaring_function,), 37 | pp_modules=("math",), 38 | pop_size=8, 39 | bounder=inspyred.ec.Bounder(-5.12, 5.12), 40 | maximize=False, 41 | max_evaluations=256, 42 | num_inputs=3) 43 | 44 | if display: 45 | best = max(final_pop) 46 | print('Best Solution: \n{0}'.format(str(best))) 47 | return ea 48 | 49 | if __name__ == '__main__': 50 | main(display=True) 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Packages:GeoPy 2 | Author: Liangjun Zhu 3 | Email: zlj@lreis.ac.cn 4 | 5 | If this repository occurs to you, it is truely my pleasure. And, I hope any of this code could make a help for you. 6 | At fact, this repository is a collection of my study on python, especially based on Arcpy(ArcGIS 9.3.x ~ 10.x). Every single .py file or folder can work independently. 7 | Next, I will try my best to make a clear instrument for each function. 8 | 9 | 1. MultiValue2Zones 10 | This script will statistic the value of given rasters within the 11 | zones of another polygon shapefile and report the results to a 12 | CSV file. 13 | This script can calculate values included "MEAN","MAJORITY", 14 | "MAXIMUM","MEDIAN","MINIMUM","MINORITY","RANGE","STD","SUM", 15 | "VARIETY". Each raster's value will be appended to the origin 16 | shapefile's attribute table and named by the corresponding 17 | raster's name. 18 | 19 | 2. AddNearAtrributesDirections 20 | This script is used to identify the surrounding polygon and add an attribute to save the relative direction, respectively. 21 | For detail of instrument and usage, please go to http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=126293&extra=&page=1 , 22 | http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=60835&extra=&page=1 and http://bbs.esrichina-bj.cn/ESRI/viewthread.php?tid=85765. 23 | 24 | 3. CSV2PtsShp 25 | This is a very simple but useful script. To convert .CSV points coordinates to ESRI shapefile. 26 | 4. RUSLE_LS 27 | Calculates LS Factor using DEM data according to RUSLE-based criteria. 28 | -- RUSLE_LS_4_PC.AML is a AML script based on Arcinfo workstation, code original from Rick D. Van Remortel etc. 29 | -- RUSLE_LS(Tool).py is a python version to accomplish the same function based on ArcGIS 9.3. 30 | This is my advice, if your data is huge, please use the AML code, since the python version is less efficient. 31 | 32 | 33 | 5. Hydrograph 34 | This script is based on matplotlib. Download the script and test data, you can get the hydrograph. 35 | 6. 36 | -------------------------------------------------------------------------------- /RUSLE_LS/RUSLE.tbx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RUSLE_LS/RUSLE.tbx -------------------------------------------------------------------------------- /RUSLE_LS/RUSLE_LS(Tool).py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RUSLE_LS/RUSLE_LS(Tool).py -------------------------------------------------------------------------------- /RillPy/Hillslope.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from Util import * 3 | import numpy 4 | 5 | ## Functions for Hillslope Delineating ## 6 | def isFirstStreamCell(StreamRaster, nodata, row, col, flow_dir): 7 | nrows,ncols = StreamRaster.shape 8 | if(StreamRaster[row][col] == nodata): 9 | return False 10 | else: 11 | for di in [-1,0,1]: 12 | for dj in [-1,0,1]: 13 | ni = row + di 14 | nj = col + dj 15 | if ni < 0 or nj < 0 or ni >= nrows or nj >= ncols or flow_dir[ni][nj] <=0: 16 | continue 17 | if downstream_index(flow_dir[ni][nj], ni, nj) == (row,col) and (StreamRaster[ni][nj] != nodata): 18 | return False 19 | return True 20 | def isStreamSegmentCell(StreamRaster, nodata, row, col, flow_dir): 21 | ## 1 means First cell, 2 means Finally cell, 3 means middle cells. 22 | nrows,ncols = StreamRaster.shape 23 | count = 0 24 | if StreamRaster[row][col] == nodata: 25 | return 0 26 | else: 27 | for di in [-1,0,1]: 28 | for dj in [-1,0,1]: 29 | ni = row + di 30 | nj = col + dj 31 | if ni < 0 or nj < 0 or ni >= nrows or nj >= ncols or flow_dir[ni][nj] <=0: 32 | continue 33 | if downstream_index(flow_dir[ni][nj], ni, nj) == (row,col) and (StreamRaster[ni][nj] == StreamRaster[row][col]): 34 | count = count + 1 35 | if count >= 1: 36 | idx = downstream_index(flow_dir[row][col], row, col) 37 | if idx[0] >= nrows or idx[1] >= ncols or idx[0] <0 or idx[1] < 0 or StreamRaster[idx[0]][idx[1]] == nodata: 38 | return 2 39 | else: 40 | return 3 41 | else: 42 | return 1 43 | def GetRillStartIdx(StreamLinks,nodata,FlowDir): 44 | # Get first cell index of each rill 45 | nrows,ncols = StreamLinks.shape 46 | countRill = 0 47 | countmid = 0 48 | countend = 0 49 | RillStartIdx = [] 50 | for i in range(nrows): 51 | for j in range(ncols): 52 | if (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 1): 53 | countRill = countRill + 1 54 | RillStartIdx.append((i,j)) 55 | elif (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 3): 56 | countend = countend + 1 57 | elif (isStreamSegmentCell(StreamLinks,nodata,i,j,FlowDir) == 2): 58 | countmid = countmid + 1 59 | 60 | #print "Rill number is : %s,%s,%s" % (countRill,countmid,countend) 61 | return RillStartIdx 62 | 63 | def fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,row,col): 64 | nrows,ncols = flow_dir.shape 65 | for di in [-1,0,1]: 66 | for dj in [-1,0,1]: 67 | tempRow = di + row 68 | tempCol = dj + col 69 | if tempRow < 0 or tempCol < 0 or tempRow >= nrows or tempCol >= ncols: 70 | continue 71 | if downstream_index(flow_dir[tempRow][tempCol],tempRow,tempCol)==(row,col) and stream[tempRow][tempCol] == nodata: 72 | if hillslp[tempRow][tempCol] != 1: 73 | hillslp[tempRow][tempCol] = value 74 | #print tempRow,tempCol 75 | fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,tempRow,tempCol) 76 | 77 | def DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile): 78 | print "Delineating hillslopes (header, left, and right hillslope)..." 79 | StreamLinks = ReadRaster(StreamFile).data 80 | nodata = ReadRaster(StreamFile).noDataValue 81 | geotrans = ReadRaster(StreamFile).geotrans 82 | FlowDir = ReadRaster(FlowDirFile).data 83 | nrows,ncols = StreamLinks.shape 84 | count = 0 85 | SourcePtsIdx = [] 86 | for i in range(nrows): 87 | for j in range(ncols): 88 | if(isFirstStreamCell(StreamLinks,nodata,i,j,FlowDir)): 89 | count = count +1 90 | SourcePtsIdx.append((i,j)) 91 | 92 | #print "Headwater point:%s" % count 93 | #test = GetRillStartIdx(StreamLinks,nodata,FlowDir) 94 | HillslopeMtx = numpy.ones((nrows,ncols)) 95 | if nodata != -9999: 96 | HillslopeMtx = HillslopeMtx * -9999 97 | else: 98 | HillslopeMtx = HillslopeMtx * nodata 99 | for SourcePt in SourcePtsIdx: 100 | #print SourcePt 101 | cRow,cCol = SourcePt 102 | for di in [-1,0,1]: 103 | for dj in [-1,0,1]: 104 | ci = cRow + di 105 | cj = cCol + dj 106 | if ci < 0 or cj < 0 or ci >= nrows or cj >= ncols: 107 | continue 108 | if downstream_index(FlowDir[ci][cj],ci,cj)==(cRow,cCol): 109 | HillslopeMtx[ci][cj] = 0 110 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,0,ci,cj) 111 | previous = SourcePt 112 | current = downstream_index(FlowDir[cRow][cCol],cRow,cCol) 113 | 114 | while not(current[0] < 0 or current[1] < 0 or current[0] >= nrows or current[1] >= ncols): 115 | CurRow = current[0] 116 | CurCol = current[1] 117 | StreamLinkValue = StreamLinks[CurRow][CurCol] 118 | DirIdx = DIR_VALUES.index(FlowDir[CurRow][CurCol]) 119 | if DirIdx <= 7: 120 | Clockwise = range(DirIdx + 1, 8) 121 | for i in range(DirIdx): 122 | Clockwise.append(i) 123 | CounterClock = list(reversed(Clockwise)) 124 | if isStreamSegmentCell(StreamLinks,nodata,CurRow,CurCol,FlowDir) == 1: 125 | Clockwise = Clockwise[0:4] 126 | CounterClock = CounterClock[0:4] 127 | if isStreamSegmentCell(StreamLinks,nodata,CurRow,CurCol,FlowDir) == 2: 128 | DirIdx = DIR_VALUES.index(FlowDir[previous[0]][previous[1]]) 129 | Clockwise = range(DirIdx + 1, 8) 130 | for i in range(DirIdx): 131 | Clockwise.append(i) 132 | CounterClock = list(reversed(Clockwise)) 133 | Clockwise = Clockwise[0:4] 134 | CounterClock = CounterClock[0:4] 135 | for Dir in Clockwise: 136 | temprow = CurRow + DIR_ITEMS[DIR_VALUES[Dir]][0] 137 | tempcol = CurCol + DIR_ITEMS[DIR_VALUES[Dir]][1] 138 | if temprow < 0 or tempcol < 0 or temprow >= nrows or tempcol >= ncols: 139 | continue 140 | if downstream_index(FlowDir[temprow][tempcol],temprow,tempcol) == (CurRow,CurCol): 141 | if StreamLinks[temprow][tempcol] == StreamLinkValue: 142 | break 143 | elif StreamLinks[temprow][tempcol] != nodata: 144 | continue 145 | else: 146 | HillslopeMtx[temprow][tempcol] = 1 147 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,1,temprow,tempcol) 148 | for Dir in CounterClock: 149 | temprow = CurRow + DIR_ITEMS[DIR_VALUES[Dir]][0] 150 | tempcol = CurCol + DIR_ITEMS[DIR_VALUES[Dir]][1] 151 | if temprow < 0 or tempcol < 0 or temprow >= nrows or tempcol >= ncols: 152 | continue 153 | if downstream_index(FlowDir[temprow][tempcol],temprow,tempcol) == (CurRow,CurCol): 154 | if StreamLinks[temprow][tempcol] == StreamLinkValue: 155 | break 156 | elif StreamLinks[temprow][tempcol] != nodata: 157 | continue 158 | elif HillslopeMtx[temprow][tempcol] != 1: 159 | HillslopeMtx[temprow][tempcol] = 2 160 | fillUpstreamCells(FlowDir,StreamLinks,nodata,HillslopeMtx,2,temprow,tempcol) 161 | previous = current 162 | current = downstream_index(FlowDir[CurRow][CurCol],CurRow,CurCol) 163 | WriteAscFile(HillslpFile, HillslopeMtx,ncols,nrows,geotrans,-9999) 164 | 165 | 166 | 167 | if __name__=='__main__': 168 | streamf = r'C:\Users\ZhuLJ\Desktop\test\stream' 169 | flowdirf = r'C:\Users\ZhuLJ\Desktop\test\grid_fdir_1' 170 | hillslpf = r'C:\Users\ZhuLJ\Desktop\test\hillslope_test.asc' 171 | DelineateHillslopes(streamf, flowdirf, hillslpf) -------------------------------------------------------------------------------- /RillPy/Memo.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | ## Functional test! 3 | ##UpStreamShp = RillExtDir + os.sep + "UpStream.shp" 4 | ##arcpy.CreateFeatureclass_management(RillExtDir, "UpStream.shp", "POLYLINE", "", "DISABLED", "DISABLED", "") 5 | ##arcpy.Append_management(["north.shp", "south.shp", "east.shp", "west.shp"], UpStreamShp, "NO_TEST","","") 6 | import math,copy 7 | #Elev = [398.64911,395.37039,389.93884,382.65137,375.08615,368.56583,365.2388,363.12885,362.1973,361.80881] 8 | #Elev = [383.33521,381.29871,377.68607,372.73752,366.91272,361.18701,356.59479,353.28427,349.79819,347.04926,344.62747,343.27286,341.56818,339.35349,335.77808,330.39804,323.68604,317.19299,312.7785,310.86328,310.17453,308.9617,308.42947,308.30804] 9 | # 10 | #Length = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24] 11 | #k = [] 12 | #k2 = [] 13 | #for i in range(1,len(Elev)): 14 | # #print Elev[i] 15 | # tempk = math.atan((Elev[i]-Elev[i-1])/(Length[i]-Length[i-1]))*180./math.pi 16 | # if tempk < 0: 17 | # tempk = 180 + tempk 18 | # k.append(tempk) 19 | #print k 20 | #for i in range(1,len(k)): 21 | # tempk2 = math.atan((k[i]-k[i-1])/(Length[i+1]-Length[i]))*180./math.pi 22 | # if tempk2 < 0: 23 | # tempk2 = 180 + tempk2 24 | # k2.append(tempk2) 25 | #print k2 26 | #print len(k2),k2.index(max(k2)) 27 | #curRouteSOS = [57.239685,68.406319,60.301327,44.899513,57.68351,61.255352,28.919455,31.739635,47.483128,70.880402,74.572784,70.896515,46.440041] 28 | #curRouteSlp = [6.942997,17.525024,25.871187,29.097631,33.821583,45.065792,52.355793,51.575272,47.721035,38.855087,26.716295,13.982291,6.188683] 29 | #curRouteElev = [405.30917,404.47617,402.88577,400.73474,397.63855,393.18539,386.34515,378.82489,372.15216,366.53317,364.11539,362.88748,362.13895] 30 | #lowerMaxSOS = max(curRouteSOS) * 0.9 #- 0.05 * (max(curRouteSOS) - min(curRouteSOS)) 31 | #MaxSlpIdx = curRouteSlp.index(max(curRouteSlp)) 32 | #MaxSOSIdx = curRouteSOS.index(max(curRouteSOS)) 33 | #temp = copy.copy(curRouteSOS) 34 | #temp.sort() 35 | #SecSOSIdx = curRouteSOS.index(temp[len(temp)-2]) 36 | #EdgeIdx = 0 37 | #if MaxSlpIdx >= min(MaxSOSIdx,SecSOSIdx) and MaxSlpIdx <= max(MaxSOSIdx,SecSOSIdx): 38 | # for i in range(min(MaxSOSIdx,SecSOSIdx)+1): #,max(MaxSOSIdx,SecSOSIdx)): 39 | # if curRouteSlp[i] >= 20: 40 | # EdgeIdx = i 41 | # break 42 | #for i in range(9): 43 | # if curRouteSOS[i] >= lowerMaxSOS and curRouteSlp[i] >= 20: 44 | # if EdgeIdx != 0: 45 | # EdgeIdx = min(EdgeIdx, i) 46 | # else: 47 | # EdgeIdx = i 48 | # print EdgeIdx 49 | # break 50 | 51 | #MaxSOSIdx = curRouteSOS.index(max(curRouteSOS)) 52 | #tempSOS = copy.copy(curRouteSOS) 53 | #tempSOS.sort() 54 | #SecSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-2]) 55 | #if len(curRouteElev) > 3: 56 | # if MaxSOSIdx in range(len(curRouteElev)-3,len(curRouteElev)): 57 | # MaxSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-3]) 58 | # SecSOSIdx = curRouteSOS.index(tempSOS[len(tempSOS)-2]) 59 | # 60 | #lowerMaxSOS = curRouteSOS[MaxSOSIdx] * 0.9 #- 0.05 * (max(curRouteSOS) - min(curRouteSOS)) 61 | #MaxSlpIdx = curRouteSlp.index(max(curRouteSlp)) 62 | #EdgeIdx = 9999 63 | #if MaxSlpIdx >= min(MaxSOSIdx,SecSOSIdx) and MaxSlpIdx <= max(MaxSOSIdx,SecSOSIdx): 64 | # for i in range(min(MaxSOSIdx,SecSOSIdx)+1): #,max(MaxSOSIdx,SecSOSIdx)): 65 | # if curRouteSlp[i] >= 20: 66 | # EdgeIdx = i 67 | # break 68 | #for i in range(11): 69 | # if curRouteSOS[i] >= lowerMaxSOS and curRouteSlp[i] >= 20: 70 | # if EdgeIdx != 9999: 71 | # EdgeIdx = min(EdgeIdx, i) 72 | # break 73 | # else: 74 | # EdgeIdx = i 75 | # break 76 | #print EdgeIdx 77 | 78 | 79 | lists = [[[55, 62], [56, 62], [57, 62], [58, 62], [59, 63], [60, 64]], [[58, 63], [59, 64], [60, 64]], [[57, 63], [58, 64], [59, 64], [60, 64]], [[56, 63], [57, 64], [58, 64], [59, 64], [60, 64]], [[51, 59], [52, 60], [53, 61], [54, 62], [55, 63], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[43, 60], [44, 60], [45, 60], [46, 60], [47, 60], [48, 60], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[45, 61], [46, 61], [47, 61], [48, 61], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]], [[37, 60], [38, 60], [39, 61], [40, 61], [41, 61], [42, 61], [43, 61], [44, 61], [45, 62], [46, 62], [47, 62], [48, 61], [49, 60], [50, 60], [51, 60], [52, 61], [53, 62], [54, 63], [55, 64], [56, 64], [57, 64], [58, 64], [59, 64], [60, 64]]] 80 | #print lists 81 | f = open(r'e:\test.txt','w') 82 | for list in lists: 83 | ##print list 84 | f.write(str(list)) 85 | f.write('\n') 86 | f.close() 87 | count = 0 88 | for line in open(r'e:\test.txt'): 89 | count = count + 1 90 | s = eval(line) 91 | print len(s) 92 | print count -------------------------------------------------------------------------------- /RillPy/README.txt: -------------------------------------------------------------------------------- 1 | RillPy is a tool for rill extraction and rill morphological characteristics calculation based on Arcpy,gdal,Scipy etc. 2 | 3 | The whole tool contains of several modules: 4 | --Util.py Some fundamental functions. 5 | --Subbasin.py Subbasin delineation functions. 6 | --Hillslope.py Hillslope delineation functions. 7 | --Rill.py 8 | --ShoulderLine.py 9 | --main.py Configure the whole tool and make the entrance. 10 | Functions in detail as follows. 11 | --Util 12 | ---currentPath() 13 | ---makeResultFolder(rootdir) 14 | ---downstream_index(DIR_VALUE, i, j) 15 | ---ReadRaster(rasterFile) 16 | ---WriteAscFile(filename, data, xsize, ysize, geotransform, noDataValue) 17 | ---WriteGTiffFile(filename, nRows, nCols, data, geotransform, srs, noDataValue, gdalType) 18 | ---WriteGTiffFileByMask(filename, data, mask, gdalType) 19 | ---NashCoef(qObs, qSimu) 20 | ---RMSE(list1, list2) 21 | ---StdEv(list1) 22 | ---UtilHydroFiles(DEMsrc, PreprocessDir) 23 | ---RemoveLessPts(RasterFile,num,OutputRaster) 24 | 25 | --Subbasin 26 | ---GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,threshold,folder) 27 | ---GenerateWatershedByStream 28 | ---RillIndexCalc(DEMbuf,StreamOrder) 29 | --- 30 | --Hillslope 31 | ---isFirstStreamCell(StreamRaster, nodata, row, col, flow_dir) 32 | ---isStreamSegmentCell(StreamRaster, nodata, row, col, flow_dir) 33 | ---GetRillStartIdx(StreamLinks,nodata,FlowDir) 34 | ---fillUpstreamCells(flow_dir,stream,nodata,hillslp,value,row,col) 35 | ---DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile) 36 | --- 37 | --Rill 38 | ---IdentifyRillRidges(HillslpFile,StreamFile,FlowDirFile,FlowAccFile,WatershedFile,DEMfil,folder) 39 | --- 40 | In the main.py, there are several parameters: 41 | --DEMsrc DEM source in which rill erosion occurs. 42 | --rootdir The result folder. If the folder is not exists, it will be make; if rootdir 43 | is "", the result folder will in the current folder and named "RillPyResults" 44 | In the rootdir, four folders will be created. 45 | 0Temp, 1Preprocess, 2Rill, 3Stats 46 | --streamTHR Threshold for initial streamlinks and subbasions extraction. 47 | If streamTHR = 0, the program will set the threshold as 1% percent of 48 | accumulation by default; if 0< streamTHR < 1, it will be streamTHR*accum; 49 | else if streamTHR > 1, it is the threshold. -------------------------------------------------------------------------------- /RillPy/ShoulderLine.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from Util import * 3 | from Hillslope import * 4 | from Subbasin import * 5 | import os,sys 6 | 7 | def IdentifyRillShoulderPts(Aspect,Slope,ProfC,alpha,beta,ShoulderPts): 8 | aspect = ReadRaster(Aspect).data 9 | nrows,ncols = aspect.shape 10 | nodata = ReadRaster(Aspect).noDataValue 11 | geotrans = ReadRaster(Aspect).geotrans 12 | slope = ReadRaster(Slope).data 13 | profc = ReadRaster(ProfC).data 14 | ShoulderPtsMtx = numpy.ones((nrows,ncols)) 15 | if nodata != -9999: 16 | ShoulderPtsMtx = ShoulderPtsMtx * -9999 17 | else: 18 | ShoulderPtsMtx = ShoulderPtsMtx * nodata 19 | 20 | for i in range(nrows): 21 | for j in range(ncols): 22 | # North 23 | if (aspect[i][j] >= 0 and aspect[i][j] < 22.5) or (aspect[i][j] >= 337.5 and aspect[i][j] < 360): 24 | if not(i-1 < 0 or i+1 >= nrows): 25 | if (slope[i][j]alpha and (slope[i-1][j]-slope[i+1][j] > beta) and profc[i][j]<0: 26 | ShoulderPtsMtx[i][j] = 1 27 | continue 28 | # Northeast 29 | if (aspect[i][j] >= 22.5 and aspect[i][j] < 67.5): 30 | if not(i-1 < 0 or i+1>nrows or j-1<0 or j+1 >= ncols): 31 | if (slope[i][j]alpha and (slope[i-1][j+1]-slope[i+1][j-1] > beta) and profc[i][j]<0: 32 | ShoulderPtsMtx[i][j] = 1 33 | continue 34 | # East 35 | if (aspect[i][j] >= 67.5 and aspect[i][j] < 112.5): 36 | if not(j-1 < 0 or j+1 >= ncols): 37 | if (slope[i][j]alpha and (slope[i][j+1]-slope[i][j-1] > beta) and profc[i][j]<0: 38 | ShoulderPtsMtx[i][j] = 1 39 | continue 40 | # Southeast 41 | if (aspect[i][j] >= 112.5 and aspect[i][j] < 157.5): 42 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols): 43 | if (slope[i][j]alpha and (slope[i+1][j+1]-slope[i-1][j-1] > beta) and profc[i][j]<0: 44 | ShoulderPtsMtx[i][j] = 1 45 | continue 46 | # South 47 | if (aspect[i][j] >= 157.5 and aspect[i][j] < 202.5): 48 | if not(i-1 < 0 or i+1 >= nrows): 49 | if (slope[i][j]alpha and (slope[i+1][j]-slope[i-1][j] > beta) and profc[i][j]<0: 50 | ShoulderPtsMtx[i][j] = 1 51 | continue 52 | # Southwest 53 | if (aspect[i][j] >= 202.5 and aspect[i][j] < 247.5): 54 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols): 55 | if (slope[i][j]alpha and (slope[i+1][j-1]-slope[i-1][j+1] > beta) and profc[i][j]<0: 56 | ShoulderPtsMtx[i][j] = 1 57 | continue 58 | # West 59 | if (aspect[i][j] >= 247.5 and aspect[i][j] < 292.5): 60 | if not(j-1 < 0 or j+1 >= ncols): 61 | if (slope[i][j]alpha and (slope[i][j-1]-slope[i][j+1] > beta) and profc[i][j]<0: 62 | ShoulderPtsMtx[i][j] = 1 63 | continue 64 | # Northwest 65 | if (aspect[i][j] >= 292.5 and aspect[i][j] < 337.5): 66 | if not(i-1 < 0 or i+1 >= nrows or j-1 < 0 or j+1 >= ncols): 67 | if (slope[i][j]alpha and (slope[i-1][j-1]-slope[i+1][j+1] > beta) and profc[i][j]<0: 68 | ShoulderPtsMtx[i][j] = 1 69 | continue 70 | WriteAscFile(ShoulderPts, ShoulderPtsMtx,ncols,nrows,geotrans,-9999) 71 | 72 | def RillShoulderSegement(Boundary,FlowDir,ShoulderPts,ShoulderFile): 73 | flowdir = ReadRaster(FlowDir).data 74 | flownodata = ReadRaster(FlowDir).noDataValue 75 | geotrans = ReadRaster(FlowDir).geotrans 76 | boundary = ReadRaster(Boundary).data 77 | shoulderpts = ReadRaster(ShoulderPts).data 78 | nrows,ncols = flowdir.shape 79 | nodata = ReadRaster(Boundary).noDataValue 80 | bndIdx = [] 81 | for i in range(nrows): 82 | for j in range(ncols): 83 | if boundary[i][j] != nodata: 84 | #print i,j 85 | bndIdx.append((i,j)) 86 | iterate = 0 87 | changed = 1 88 | while not(changed == 0 or iterate > 150): 89 | print "iterate time:%s, changed num:%s, boundary num:%s" % (iterate,changed,len(bndIdx)) 90 | changed = 0 91 | tempbndIdx = [] 92 | for bnd in bndIdx: 93 | if shoulderpts[bnd[0]][bnd[1]] == 1: 94 | tempbndIdx.append((bnd[0],bnd[1])) 95 | else: 96 | row,col = downstream_index(flowdir[bnd[0]][bnd[1]], bnd[0],bnd[1]) 97 | if row < 0 or row >= nrows or col < 0 or col >= ncols: 98 | tempbndIdx.append((bnd[0],bnd[1])) 99 | else: 100 | tempbndIdx.append((row,col)) 101 | changed = changed + 1 102 | tempbndIdx = list(set(tempbndIdx)) 103 | bndIdx = tempbndIdx 104 | iterate = iterate + 1 105 | shoulder = numpy.ones((nrows,ncols)) 106 | shoulder = shoulder * nodata 107 | for sd in bndIdx: 108 | shoulder[sd[0]][sd[1]] = 1 109 | WriteAscFile(ShoulderFile, shoulder,ncols,nrows,geotrans,nodata) 110 | 111 | def RillShoulder(BasinFile,FlowDir,ShoulderPts,tempDir,ShoulderFile): 112 | UniqueBasinId = GetUniqueValues(BasinFile) 113 | print UniqueBasinId 114 | for BsnID in UniqueBasinId: 115 | tempBsnID = [] 116 | tempBsnID.append(BsnID) 117 | BsnASC = tempDir + os.sep + "BsnID" + str(BsnID) + ".asc" 118 | ExtractBasinBoundary(BasinFile,tempBsnID,BsnASC) 119 | ShldASC = tempDir + os.sep + "Shld" + str(BsnID) + ".asc" 120 | RillShoulderSegement(BsnASC,FlowDir,ShoulderPts,ShldASC) 121 | 122 | -------------------------------------------------------------------------------- /RillPy/Subbasin.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import os,numpy 3 | import arcpy 4 | from arcpy import env 5 | 6 | from Util import * 7 | 8 | def GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,threshold,folder): 9 | print "Generating initial stream network according to threshold of flow accumulation..." 10 | env.workspace = folder 11 | arcpy.gp.overwriteOutput = 1 12 | arcpy.CheckOutExtension("Spatial") 13 | threshold = float(threshold) 14 | maxAcc = float(str(arcpy.GetRasterProperties_management(FlowAccFile,"MAXIMUM"))) 15 | if threshold < MINI_VALUE: 16 | threshold = maxAcc / 100 17 | elif threshold >= MINI_VALUE and threshold <= 1: 18 | threshold = maxAcc * threshold 19 | elif threshold > 1: 20 | threshold = threshold 21 | Exec = "Con(\"%s\" > %s,1)" % (FlowAccFile, threshold) 22 | arcpy.gp.RasterCalculator_sa(Exec, "streamnet") 23 | Stream_shp = "streamnet.shp" 24 | arcpy.sa.StreamToFeature("streamnet",FlowDirFile,Stream_shp,"NO_SIMPLIFY") 25 | StreamLinks = arcpy.sa.StreamLink("streamnet",FlowDirFile) 26 | StreamLinks.save("streamlinks") 27 | StreamLinks_shp = "streamnet.shp" 28 | arcpy.sa.StreamToFeature("streamlinks",FlowDirFile,StreamLinks_shp,"NO_SIMPLIFY") 29 | StreamOrder = arcpy.sa.StreamOrder("streamnet",FlowDirFile,"STRAHLER") 30 | StreamOrder.save("streamorder") 31 | StreamOrderFile = folder + os.sep + "StreamOrder.shp" 32 | arcpy.sa.StreamToFeature("streamorder",FlowDirFile,StreamOrderFile,"NO_SIMPLIFY") 33 | Watershed = arcpy.sa.Watershed(FlowDirFile,"streamlinks","VALUE") 34 | Watershed.save("watershed") 35 | arcpy.RasterToPolygon_conversion("watershed","Watershed.shp","NO_SIMPLIFY","VALUE") 36 | WatershedFile = folder + os.sep + "watershed" 37 | StreamFile = folder + os.sep + "streamlinks" 38 | return (StreamFile,StreamOrderFile,WatershedFile) 39 | def RillIndexCalc(StreamOrderFile,DEMbuf,tempDir,StatsDir): 40 | print "Calculating rill indexes..." 41 | #input StreamOrderFile and DEMbuf,output CSV files. 42 | env.workspace = tempDir 43 | arcpy.gp.overwriteOutput = 1 44 | arcpy.CheckOutExtension("Spatial") 45 | dem_des = arcpy.gp.describe(DEMbuf) 46 | env.extent = dem_des.Extent 47 | arcpy.FeatureVerticesToPoints_management(StreamOrderFile,"StreamNDsStart.shp","START") 48 | arcpy.FeatureVerticesToPoints_management(StreamOrderFile,"StreamNDsEnd.shp","END") 49 | arcpy.AddXY_management("StreamNDsStart.shp") 50 | arcpy.AddXY_management("StreamNDsEnd.shp") 51 | arcpy.sa.ExtractValuesToPoints("StreamNDsStart.shp",DEMbuf,"StreamNDsElevStart.shp","NONE", "VALUE_ONLY") 52 | arcpy.sa.ExtractValuesToPoints("StreamNDsEnd.shp",DEMbuf,"StreamNDsElevEnd.shp","NONE", "VALUE_ONLY") 53 | 54 | def GenerateWatershedByStream(StreamFile,FlowDirFile, tempDir, WatershedFile): 55 | print "Regenerating watershed by real rill network..." 56 | arcpy.CheckOutExtension("spatial") 57 | arcpy.gp.overwriteOutput = 1 58 | 59 | tempStream = tempDir + os.sep + "StmNet" 60 | arcpy.ASCIIToRaster_conversion(StreamFile, tempStream,"INTEGER") 61 | Watershed = arcpy.sa.Watershed(FlowDirFile,tempStream,"VALUE") 62 | tempWtshd = tempDir + os.sep + "WtShd" 63 | Watershed.save(tempWtshd) 64 | GRID2ASC(tempWtshd,WatershedFile) 65 | 66 | def isEdge(raster,row,col,nodata): 67 | nrows,ncols = raster.shape 68 | if (row == 0 or row == nrows-1 or col == 0 or col == ncols-1) and raster[row][col] != nodata: 69 | return True 70 | elif raster[row][col] == nodata: 71 | return False 72 | else: 73 | count = 0 74 | for di in [-1,0,1]: 75 | for dj in [-1,0,1]: 76 | ni = row + di 77 | nj = col + dj 78 | if raster[ni][nj] == nodata: 79 | count = count + 1 80 | if count > 0: 81 | return True 82 | else: 83 | return False 84 | 85 | def ExtractBasinBoundary(Basin,basinID,BasinBoundary): 86 | basin = ReadRaster(Basin).data 87 | nodata = ReadRaster(Basin).noDataValue 88 | #print nodata 89 | geotrans = ReadRaster(Basin).geotrans 90 | nrows,ncols = basin.shape 91 | Boundary = numpy.ones((nrows,ncols)) 92 | if nodata != -9999: 93 | Boundary = Boundary * -9999 94 | else: 95 | Boundary = Boundary * nodata 96 | 97 | for i in range(nrows): 98 | for j in range(ncols): 99 | if basin[i][j] in basinID: 100 | #count = count + 1 101 | basin[i][j] = 1 102 | else: 103 | basin[i][j] = nodata 104 | for i in range(nrows): 105 | for j in range(ncols): 106 | if isEdge(basin,i,j,nodata): 107 | Boundary[i][j] = 1 108 | WriteAscFile(BasinBoundary, Boundary,ncols,nrows,geotrans,-9999) 109 | -------------------------------------------------------------------------------- /RillPy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/RillPy/__init__.py -------------------------------------------------------------------------------- /RillPy/main.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ 3 | @Created : 2015-1-6 4 | @Revised : 2015-1-28 Divided into seperate files for better version control 5 | 6 | @author : Liangjun Zhu 7 | @summary : Delineating and Extracting hillslopes and real rill from DEM. 8 | @param : DEMsrc, rootdir, streamTHR 9 | @requires : ArcGIS 10.x, gdal, Scipy 10 | @references: Detail information will be found in README.txt. 11 | @contract : zlj@lreis.ac.cn 12 | """ 13 | import os 14 | import Util 15 | import Subbasin 16 | import Hillslope 17 | import Rill 18 | import ShoulderLine 19 | 20 | if __name__ == '__main__': 21 | ## Input params 22 | DEMsrc = r'E:\MasterBNU\RillMorphology\test\testdem' 23 | rootdir = r'E:\MasterBNU\RillMorphology\20150130' 24 | streamTHR = 0.01 25 | 26 | ## Run algorithms 27 | tempDir,PreprocessDir,RillExtDir,StatsDir = Util.makeResultFolders(rootdir) 28 | # DEMbuf,DEMfil,SlopeFile,SOSFile,AspectFile,FlowDirFile,FlowAccFile,CurvProfFile,CurvPlanFile = Util.UtilHydroFiles(DEMsrc, PreprocessDir) 29 | # StreamFile,StreamOrderFile,WatershedFile = Subbasin.GenerateStreamNetByTHR(DEMbuf,FlowDirFile,FlowAccFile,streamTHR,tempDir) 30 | # Subbasin.RillIndexCalc(StreamOrderFile,DEMbuf,tempDir,StatsDir) 31 | 32 | HillslpFile = RillExtDir + os.sep + "HillSlp.asc" 33 | #Hillslope.DelineateHillslopes(StreamFile,FlowDirFile,HillslpFile) 34 | 35 | DEMfil = PreprocessDir + os.sep + "DEMfil" 36 | StreamFile = tempDir + os.sep + "StreamLinks" 37 | WatershedFile = tempDir + os.sep + "watershed" 38 | AspectFile = PreprocessDir + os.sep + "aspect" 39 | SlopeFile = PreprocessDir + os.sep + "slope" 40 | SOSFile = PreprocessDir + os.sep + "sos" 41 | CurvProfFile = PreprocessDir + os.sep + "curvprof" 42 | FlowDirFile = PreprocessDir + os.sep + "flowdir" 43 | FlowAccFile = PreprocessDir + os.sep + "flowacc" 44 | UpStreamRouteFile = RillExtDir + os.sep + "UpstreamRoute.txt" 45 | UpStreamRouteShp = RillExtDir + os.sep + "UpstreamRoute.shp" 46 | ShoulderptsFile = RillExtDir + os.sep + "Shoulderpts.asc" 47 | RealrillFile1 = RillExtDir + os.sep + "Realrill1.asc" 48 | RealrillFile2 = RillExtDir + os.sep + "Realrill2.asc" 49 | RillEdgeFile = RillExtDir + os.sep + "RealEdge.asc" 50 | RealRillFinal = RillExtDir + os.sep + "RealRill.asc" 51 | RillStFile = RillExtDir + os.sep + "RealRillFinal.asc" 52 | OrderStFile = RillExtDir + os.sep + "RillOrderFinal.asc" 53 | FinalWtdFile = RillExtDir + os.sep + "WatershedFinal.asc" 54 | HillslpFinalFile = RillExtDir + os.sep + "HillslpFinal.asc" 55 | UpStreamRouteFinalFile = RillExtDir + os.sep + "UpstreamRouteFinal.txt" 56 | UpStreamRouteFinalShp = RillExtDir + os.sep + "UpstreamRouteFinal.shp" 57 | ShoulderptsFinalFile = RillExtDir + os.sep + "ShoulderptsFinal.asc" 58 | RealrillFile1Final = RillExtDir + os.sep + "Realrill1final.asc" 59 | #Rill.UpStreamRoute(DEMfil,WatershedFile,HillslpFile,StreamFile,FlowDirFile,RillExtDir,UpStreamRouteFile,UpStreamRouteShp) 60 | #Rill.Shoulderpts(UpStreamRouteFile,DEMfil,SlopeFile,SOSFile,RillExtDir,ShoulderptsFile,RealrillFile1) 61 | #Rill.IdentifyRillRidges(HillslpFile,StreamFile,FlowDirFile,FlowAccFile,WatershedFile,DEMfil,RealrillFile2,RillEdgeFile) 62 | #Rill.RelinkRealRill(RealrillFile1,RealrillFile2,StreamFile,FlowDirFile,RealRillFinal) 63 | #Rill.SimplifyByRillOrder(RealRillFinal,FlowDirFile,tempDir,5,RillStFile,OrderStFile) 64 | #Subbasin.GenerateWatershedByStream(RillStFile,FlowDirFile, tempDir, FinalWtdFile) 65 | #Hillslope.DelineateHillslopes(RillStFile,FlowDirFile,HillslpFinalFile) 66 | #Rill.UpStreamRoute(DEMfil,FinalWtdFile,HillslpFinalFile,RillStFile,FlowDirFile,RillExtDir,UpStreamRouteFinalFile,UpStreamRouteFinalShp) 67 | Rill.Shoulderpts(UpStreamRouteFinalFile,DEMfil,SlopeFile,SOSFile,RillExtDir,ShoulderptsFinalFile,RealrillFile1Final) 68 | 69 | 70 | #alpha = 25 71 | #beta = 5 72 | #ShoulderPtsOrig = RillExtDir + os.sep + "ShoulderPtsOrig.asc" 73 | #ShoulderLine.IdentifyRillShoulderPts(AspectFile,SlopeFile,CurvProfFile,alpha,beta,ShoulderPtsOrig) 74 | #num = 50 75 | #ShoulderPts = RillExtDir + os.sep + "ShoulderPts.asc" 76 | #Util.RemoveLessPts(ShoulderPtsOrig,num,ShoulderPts) 77 | #Basin = PreprocessDir + os.sep + "basin" 78 | #Watershed = tempDir + os.sep + "watershed" 79 | #basinID = [1,4,25,26] 80 | #BasinBoundary = PreprocessDir + os.sep + "basinBounday.asc" 81 | #Subbasin.ExtractBasinBoundary(Basin,basinID,BasinBoundary) 82 | #Shoulder = RillExtDir + os.sep + "Shoulder.asc" 83 | #ShoulderLine.RillShoulderSegement(BasinBoundary,FlowDirFile,ShoulderPts,Shoulder) 84 | #ShoulderLine.RillShoulder(Watershed,FlowDirFile,ShoulderPts,tempDir,Shoulder) -------------------------------------------------------------------------------- /SWAT_post_process/Read_SWAT_Output_MDB.py: -------------------------------------------------------------------------------- 1 | import pyodbc 2 | import sys, os 3 | 4 | 5 | def readTable(mdbfile, tableName, findField, findValue, csvfile): 6 | odbc_conn_str = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;UID=;PWD=;' % mdbfile 7 | # print odbc_conn_str 8 | conn = pyodbc.connect(odbc_conn_str) 9 | cursor = conn.cursor() 10 | field_sel = ["YEAR", "MON", "FLOW_OUTcms", "SED_OUTtons", "NO3_OUTkg", "NH4_OUTkg", "NO2_OUTkg", 11 | "TOT_Nkg", "TOT_Pkg", "MINP_OUTkg", "ORGP_OUTkg"] 12 | field_sel_idx = [] 13 | fields = [] 14 | fields_str = '' 15 | for row in cursor.columns(table=tableName): 16 | fields.append(row.column_name) 17 | for field in field_sel: 18 | if field in fields: 19 | field_sel_idx.append(fields.index(field)) 20 | fields_str += field 21 | fields_str += ',' 22 | # print fields_str 23 | query = "SELECT * FROM %s WHERE %s=%s" % (tableName, findField, findValue) 24 | # print query 25 | cursor.execute(query) 26 | rows = cursor.fetchall() 27 | f = open(csv_file, 'w') 28 | f.write(fields_str) 29 | f.write('\n') 30 | for row in rows: 31 | row_str = '' 32 | for i in field_sel_idx: 33 | row_str += str(row[i]) 34 | row_str += "," 35 | # print row 36 | # print row_str 37 | f.write(row_str) 38 | f.write('\n') 39 | f.close() 40 | 41 | 42 | def currentPath(): 43 | path = sys.path[0] 44 | if os.path.isdir(path): 45 | return path 46 | elif os.path.isfile(path): 47 | return os.path.dirname(path) 48 | 49 | 50 | if __name__ == '__main__': 51 | path = currentPath() 52 | # SWAT_output_mdb_file = r'E:\data_m\QSWAT_projects\ZhongTianShe2\zts2\Scenarios\sim8\TablesOut\SWATOutput.mdb' 53 | # csv_file = r'E:\data_m\QSWAT_projects\ZhongTianShe2\zts2\Scenarios\sim8\TablesOut\rch.csv' 54 | SWAT_output_mdb_file = path + os.sep + "SWATOutput.mdb" 55 | # csv_file = path + os.sep + "rch.csv" 56 | # readTable(SWAT_output_mdb_file, "rch", "SUB", 11, csv_file) 57 | ## the following code can export all reaches 58 | subbsnNum = 15 59 | for i in range(1, subbsnNum + 1): 60 | csv_file = path + os.sep + "rch%s.csv" % str(i) 61 | readTable(SWAT_output_mdb_file, "rch", "SUB", i, csv_file) 62 | -------------------------------------------------------------------------------- /SWAT_post_process/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/SWAT_post_process/__init__.py -------------------------------------------------------------------------------- /SWAT_post_process/stats_SWAT_Output_mdb.py: -------------------------------------------------------------------------------- 1 | import pyodbc 2 | import sys, os 3 | import numpy 4 | 5 | 6 | def statsOutput(mdbfile, tableName, findField, findValue, years, fieldSel, csvfile): 7 | odbc_conn_str = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;UID=;PWD=;' % mdbfile 8 | # print odbc_conn_str 9 | conn = pyodbc.connect(odbc_conn_str) 10 | cursor = conn.cursor() 11 | field_sel_idx = [] 12 | fields = [] 13 | fields_str = '' 14 | for row in cursor.columns(table=tableName): 15 | fields.append(row.column_name) 16 | for field in fieldSel: 17 | if field in fields: 18 | field_sel_idx.append(fields.index(field)) 19 | fields_str += field 20 | fields_str += ',' 21 | # print fields_str 22 | query = "SELECT * FROM %s WHERE %s=%s" % (tableName, findField, findValue) 23 | # print query 24 | cursor.execute(query) 25 | rows = cursor.fetchall() 26 | f = open(csv_file, 'w') 27 | f.write(fields_str) 28 | f.write('\n') 29 | for row in rows: 30 | row_str = '' 31 | for i in field_sel_idx: 32 | row_str += str(row[i]) 33 | row_str += "," 34 | # print row 35 | # print row_str 36 | f.write(row_str) 37 | f.write('\n') 38 | f.close() 39 | 40 | 41 | def currentPath(): 42 | path = sys.path[0] 43 | if os.path.isdir(path): 44 | return path 45 | elif os.path.isfile(path): 46 | return os.path.dirname(path) 47 | 48 | 49 | if __name__ == '__main__': 50 | SWAT_output_mdb_file = r'E:\data_m\QSWAT_projects\Done\baseSim_unCali\baseSim_unCali\Scenarios\Default\TablesOut\SWATOutput.mdb' 51 | csv_file = r'E:\data_m\QSWAT_projects\Done\baseSim_unCali\baseSim_unCali\Scenarios\Default\TablesOut\rch_stats.csv' 52 | # path = currentPath() 53 | # SWAT_output_mdb_file = path + os.sep + "SWATOutput.mdb" 54 | # csv_file = path + os.sep + "rch_stats.csv" 55 | 56 | field_sel = ["FLOW_OUTcms", "SED_OUTtons", "NO3_OUTkg", "NH4_OUTkg", "NO2_OUTkg", "TOT_Nkg", 57 | "TOT_Pkg", "MINP_OUTkg", "ORGP_OUTkg"] 58 | year_sel = [2014] 59 | subbsnNum = 15 60 | statsOutput(SWAT_output_mdb_file, 'rch', 'SUB', subbsnNum, year_sel, field_sel, csv_file) 61 | -------------------------------------------------------------------------------- /SWATplusUtility/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/SWATplusUtility/__init__.py -------------------------------------------------------------------------------- /TIN_Hydro/User manual-zhulj-2016-2-20.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/User manual-zhulj-2016-2-20.docx -------------------------------------------------------------------------------- /TIN_Hydro/XYZ2ShpPoint_GDAL.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | ## @Generate ESRI Shapefile from XYZ point text file. 3 | ## @author: Liang-Jun Zhu 4 | ## @Date: 2016-6-17 5 | ## @Email: zlj@lreis.ac.cn 6 | # 7 | import os,sys,time 8 | from osgeo import ogr 9 | def currentPath(): 10 | path = sys.path[0] 11 | if os.path.isdir(path): 12 | return path 13 | elif os.path.isfile(path): 14 | return os.path.dirname(path) 15 | def WritePointShp(vertexList,zFieldName,outShp): 16 | print "Write point shapefile: %s" % outShp 17 | driver = ogr.GetDriverByName("ESRI Shapefile") 18 | if driver is None: 19 | print "ESRI Shapefile driver not available." 20 | sys.exit(1) 21 | if os.path.exists(outShp): 22 | driver.DeleteDataSource(outShp) 23 | ds = driver.CreateDataSource(outShp.rpartition(os.sep)[0]) 24 | if ds is None: 25 | print "ERROR Output: Creation of output file failed." 26 | sys.exit(1) 27 | lyr = ds.CreateLayer(outShp.rpartition(os.sep)[2].split('.')[0],None,ogr.wkbPoint) 28 | zField = ogr.FieldDefn(zFieldName,ogr.OFTReal) 29 | lyr.CreateField(zField) 30 | 31 | #vertexGeo = ogr.Geometry(ogr.wkbMultiPoint) 32 | for vertex in vertexList: 33 | vertexGeo = ogr.Geometry(ogr.wkbPoint) 34 | vertexGeo.AddPoint(vertex[0],vertex[1]) 35 | featureDefn = lyr.GetLayerDefn() 36 | vertexFeature = ogr.Feature(featureDefn) 37 | vertexFeature.SetGeometry(vertexGeo) 38 | vertexFeature.SetField(zFieldName, vertex[2]) 39 | lyr.CreateFeature(vertexFeature) 40 | vertexFeature.Destroy() 41 | ds.Destroy() 42 | def progress(percent): 43 | bar_length=20 44 | hashes = '#' * int(percent/100.0 * bar_length) 45 | spaces = ' ' * (bar_length - len(hashes)) 46 | sys.stdout.write(" Handling: [%s] %.1f%%\n"%(hashes + spaces, percent)) 47 | sys.stdout.flush() 48 | #time.sleep(1) 49 | 50 | def GeneratorPointShp(txtFile,outShp): 51 | start = time.time() 52 | lineCount = 0 53 | thefile = open(txtFile,'rb') 54 | while True: 55 | buffer = thefile.read(1024 * 8192) 56 | if not buffer: 57 | break 58 | lineCount += buffer.count('\n') 59 | thefile.close() 60 | print "There are %d points to be processed." % lineCount 61 | 62 | ## Create shapefile 63 | driver = ogr.GetDriverByName("ESRI Shapefile") 64 | if driver is None: 65 | print "ESRI Shapefile driver not available." 66 | sys.exit(1) 67 | if os.path.exists(outShp): 68 | driver.DeleteDataSource(outShp) 69 | ds = driver.CreateDataSource(outShp.rpartition(os.sep)[0]) 70 | if ds is None: 71 | print "ERROR Output: Creation of output file failed." 72 | sys.exit(1) 73 | lyr = ds.CreateLayer(outShp.rpartition(os.sep)[2].split('.')[0],None,ogr.wkbPoint) 74 | xField = ogr.FieldDefn("X",ogr.OFTReal) 75 | yField = ogr.FieldDefn("Y",ogr.OFTReal) 76 | zField = ogr.FieldDefn("Z",ogr.OFTReal) 77 | lyr.CreateField(xField) 78 | lyr.CreateField(yField) 79 | lyr.CreateField(zField) 80 | 81 | count = 0 82 | with open(txtFile) as f: 83 | for line in f: 84 | pts = line.split(',') 85 | if pts !=[] and len(pts) == 4: 86 | x = float(pts[1]) 87 | y = float(pts[2]) 88 | z = float(pts[3]) 89 | #print x,y,z 90 | 91 | vertexGeo = ogr.Geometry(ogr.wkbPoint) 92 | vertexGeo.AddPoint(x,y) 93 | featureDefn = lyr.GetLayerDefn() 94 | vertexFeature = ogr.Feature(featureDefn) 95 | vertexFeature.SetGeometry(vertexGeo) 96 | vertexFeature.SetField("X", x) 97 | vertexFeature.SetField("Y", y) 98 | vertexFeature.SetField("Z", z) 99 | lyr.CreateFeature(vertexFeature) 100 | vertexFeature.Destroy() 101 | 102 | count += 1 103 | perc = float(count)/float(lineCount) * 100 104 | if(perc%5. == 0.): 105 | progress(perc) 106 | ds.Destroy() 107 | end = time.time() 108 | secs = end - start 109 | mins = secs / 60. 110 | print "\nAll done, costs %.1f minutes!" % mins 111 | if __name__ == '__main__': 112 | currFolder = currentPath() 113 | currFolder = r'e:/test/test' 114 | filename = "xyz.txt" 115 | outfilename = "test.shp" 116 | 117 | xyzTxtFile = currFolder + os.sep + filename 118 | shpFile = currFolder + os.sep + outfilename 119 | GeneratorPointShp(xyzTxtFile, shpFile) -------------------------------------------------------------------------------- /TIN_Hydro/_project: -------------------------------------------------------------------------------- 1 | [default] 2 | projectname = python, 3 | 4 | -------------------------------------------------------------------------------- /TIN_Hydro/backup/CGAL-test.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from CGAL.Triangulations_2 import * 3 | from CGAL.Triangulations_3 import * 4 | from CGAL.Kernel import Point_3 5 | from CGAL.Kernel import Point_2 6 | from osgeo import ogr 7 | from gdalconst import * 8 | import os,sys 9 | from ShapefileIO import * 10 | 11 | pts2DList = [[1.1,2.1],[0.8,5],[5.2,1.9],[3.5,4.9],[6,7.4],[0.3,8],[-2,5.0]] 12 | print pts2DList 13 | dt = Delaunay_triangulation_2() 14 | 15 | for pt in pts2DList: 16 | dt.insert(Point_2(pt[0],pt[1])) 17 | #print "number of tin: %d" % dt.number_of_faces() 18 | TriangleList = [] 19 | TriangleVertexList = [] 20 | TriangleNbrIdxList = [] 21 | VertexList = [] 22 | TriangleVertexListASC = [] 23 | #for v in dt.vetices: 24 | # VertexList.append([v.point()[0],v.point()[1]]) 25 | #print VertexList 26 | for f in dt.faces: 27 | temppts = [] 28 | tempPtsIdx = [] 29 | for i in range(3): 30 | tempp = f.vertex(i).point() 31 | temppts.append([tempp[0],tempp[1]]) 32 | tempPtsIdx.append(pts2DList.index([tempp[0],tempp[1]])) 33 | TriangleList.append(temppts) 34 | TriangleVertexList.append(tempPtsIdx) 35 | TriangleVertexListASC.append(sorted(tempPtsIdx)) 36 | print TriangleVertexList 37 | for f in dt.faces: 38 | NbrFaceIdx = [] 39 | for i in range(3): 40 | tempFaceIdx = [] 41 | if dt.is_infinite(f.neighbor(i)) == False: 42 | for j in range(3): 43 | tempFaceIdx.append(pts2DList.index([f.neighbor(i).vertex(j).point()[0],f.neighbor(i).vertex(j).point()[1]])) 44 | NbrFaceIdx.append(TriangleVertexList.index(tempFaceIdx)) 45 | else: 46 | NbrFaceIdx.append(None) 47 | value = NbrFaceIdx.pop(2) 48 | NbrFaceIdx.insert(0,value) 49 | TriangleNbrIdxList.append(NbrFaceIdx) 50 | print TriangleNbrIdxList 51 | # 52 | #Shp = r'E:\research\TIN-based\testtin.shp' 53 | ##WritePolyonShp(TriangleList,Shp) 54 | VertexTriangleList = [] 55 | for v in dt.vertices: 56 | #print v.point() 57 | cir_faces = dt.incident_faces(v) 58 | finites_faces = [] 59 | f1 = cir_faces.next() 60 | if dt.is_infinite(f1) == False: 61 | finites_faces.append(f1) 62 | for f in cir_faces: 63 | if f == f1: 64 | break 65 | else: 66 | if dt.is_infinite(f) == False: 67 | finites_faces.append(f) 68 | finites_faces_idx = [] 69 | for f in finites_faces: 70 | tempFaceIdx = [] 71 | for i in range(3): 72 | tempFaceIdx.append(pts2DList.index([f.vertex(i).point()[0],f.vertex(i).point()[1]])) 73 | finites_faces_idx.append(TriangleVertexList.index(tempFaceIdx)) 74 | VertexTriangleList.append(finites_faces_idx) 75 | print VertexTriangleList 76 | 77 | 78 | #for f in dt.faces: 79 | # tempNeighborIdx = [] 80 | # for i in range(3): 81 | # neighborFace = f.neighbor(i) 82 | # for j in range(3): 83 | # neighborFace.vertex(j).Point 84 | # neighborFace 85 | # tempNeighborIdx.append() 86 | # 87 | # print TriangleVertexList.index(tempNeighborIdx) 88 | 89 | 90 | #ptsShp = r'E:\research\TIN-based\Points_Elev.shp' 91 | #elevField = "ELEV" 92 | #tinShp = r'E:\research\TIN-based\tin.shp' 93 | #tin3DShp = r'E:\research\TIN-based\tin3D.shp' 94 | #if not ptsShp.endswith(".shp"): 95 | # print "Error Input: Please input an shapefile!" 96 | # sys.exit(1) 97 | #ptsData = ogr.Open(ptsShp) 98 | #pts3DList = [] 99 | #pts2DList = [] 100 | #dt = Delaunay_triangulation_2() 101 | #dt3 = Delaunay_triangulation_3() 102 | #if ptsData is None: 103 | # print "Error occurs when trying to open %s!" % ptsShp 104 | # sys.exit(1) 105 | #else: 106 | # lyr = ptsData.GetLayerByIndex(0) 107 | # if lyr.GetGeomType() != 1: 108 | # print "Error Input: Please input an point shapefile!" 109 | # sys.exit(1) 110 | # hasElev = False 111 | # for field in lyr.schema: 112 | # if field.GetName() == elevField: 113 | # hasElev = True 114 | # if not hasElev: 115 | # print "Error Input: No field matches %s" % elevField 116 | # sys.exit(1) 117 | # lyr.ResetReading() 118 | # for feat in lyr: 119 | # geom = feat.GetGeometryRef() 120 | # if geom is not None and geom.GetGeometryType() == ogr.wkbPoint: 121 | # x = geom.GetX() 122 | # y = geom.GetY() 123 | # z = float(feat.GetField(feat.GetFieldIndex(elevField))) 124 | # pts3DList.append(Point_3(x,y,z)) 125 | # pts2DList.append(Point_2(x,y)) 126 | #ptsData = None 127 | ##print len(ptsList) 128 | #for p in pts2DList: 129 | # dt.insert(p) 130 | #for p in pts3DList: 131 | # dt3.insert(p) 132 | #print "2D Triangulation Numbers: %d" % dt.number_of_faces() 133 | #print "3D Triangulation Numbers: %d" % dt3.number_of_facets() 134 | ## write shapefile 135 | #driver = ogr.GetDriverByName("ESRI Shapefile") 136 | #if driver is None: 137 | # print "ESRI Shapefile driver not available." 138 | # sys.exit(1) 139 | #if os.path.exists(tinShp): 140 | # driver.DeleteDataSource(tinShp) 141 | #ds = driver.CreateDataSource(tinShp.rpartition(os.sep)[0]) 142 | #if ds is None: 143 | # print "ERROR Output: Creation of output file failed." 144 | # sys.exit(1) 145 | #lyr = ds.CreateLayer("tin",None,ogr.wkbPolygon) 146 | # 147 | #for f in dt.faces: 148 | # tempPts = [] 149 | # tri = ogr.Geometry(ogr.wkbLinearRing) 150 | # for i in range(3): 151 | # tempp = f.vertex(i).point() 152 | # tri.AddPoint(tempp[0],tempp[1]) 153 | # tempPts.append(tempp) 154 | # #print "x=%f,y=%f" % (tempp[0],tempp[1]) 155 | # tri.AddPoint(tempPts[0][0],tempPts[0][1]) 156 | # tinpoly = ogr.Geometry(ogr.wkbPolygon) 157 | # tinpoly.AddGeometry(tri) 158 | # tempTri = ogr.CreateGeometryFromJson(tinpoly.ExportToJson()) 159 | # feature = ogr.Feature(lyr.GetLayerDefn()) 160 | # feature.SetGeometry(tempTri) 161 | # lyr.CreateFeature(feature) 162 | # feature.Destroy() 163 | #ds.Destroy() 164 | -------------------------------------------------------------------------------- /TIN_Hydro/backup/fit.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import scipy 4 | import numpy 5 | import xalglib 6 | if __name__ == '__main__': 7 | fit3Pt = [[719532.147, 21198.921, 3162.84], [719541.137, 21204.959, 3162.84],[719532.008, 21208.555, 3162.84]] 8 | fit3NbrPts = [[[719542.893, 21196.805, 3161.92], [719532.147, 21198.921, 3162.84], [719527.028, 21206.985, 3164.02], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84], [719520.537, 21196.113, 3170.23], [719536.501, 21186.141, 3170.06]], [[719542.893, 21196.805, 3161.92], [719532.147, 21198.921, 3162.84], [719558.451, 21209.343, 3177.76], [719544.202, 21212.663, 3174.22], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84]], [[719532.147, 21198.921, 3162.84], [719527.028, 21206.985, 3164.02], [719524.651, 21211.249, 3164.02], [719544.202, 21212.663, 3174.22], [719541.137, 21204.959, 3162.84], [719532.008, 21208.555, 3162.84], [719530.92, 21218.239, 3175.19]]] 9 | 10 | -------------------------------------------------------------------------------- /TIN_Hydro/backup/test.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import os,sys 3 | from ShapefileIO import * 4 | from TINcreator import * 5 | 6 | def findIntersectIdx(v,v0,A,B,C): ## v is three vertexes of a triangle, v0 is a point in the steepest descent vector 7 | ### the basic idea is OC=xOA+yOB, when x>0 and y>0, then OC is between OA and OB 8 | ## intersect is the first point's index of the intersected edges 9 | for i in range(3): 10 | ## OA, OB vector 11 | o1 = [v[i][0]-v0[0],v[i][1]-v0[1]] 12 | o2 = [v[(i+1)%3][0]-v0[0],v[(i+1)%3][1]-v0[1]] 13 | ## if OA and OB are collinear? 14 | k = o1[1]*o2[0]-o1[0]*o2[1] 15 | if k == 0: 16 | ## if deepest flow path vector (A/C, B/C) is also collinear? 17 | k1 = o1[1]*A/C - o1[0]*B/C 18 | k2 = o2[1]*A/C - o2[0]*B/C 19 | if k1==0 and k2==0: 20 | intersect = i 21 | else: 22 | m = (o2[0]*B/C-o2[1]*A/C) / k 23 | if o2[0] != 0: 24 | n = (A/C-m*o1[0])/o2[0] 25 | else: 26 | n = (B/C-m*o1[1])/o2[1] 27 | if m > 0 and n > 0: 28 | intersect = i 29 | return intersect 30 | def tranglePlane(p1,p2,p3): 31 | A = p1[1]*(p2[2]-p3[2])+p2[1]*(p3[2]-p1[2])+p3[1]*(p1[2]-p2[2]) ## A = y1(z2-z3)+y2(Z3-Z1)+y3(Z1-Z2) 32 | B = p1[2]*(p2[0]-p3[0])+p2[2]*(p3[0]-p1[0])+p3[2]*(p1[0]-p2[0]) ## B = Z1(x2-x3)+z2(x3-x1)+z3(x1-x2) 33 | C = p1[0]*(p2[1]-p3[1])+p2[0]*(p3[1]-p1[1])+p3[0]*(p1[1]-p2[1]) ## C = x1(y2-y3)+x2(y3-y1)+x3(y1-y2) 34 | D = -1*A*p1[0]-B*p1[1]-C*p1[2] ## D = -Ax1-By1-Cz1 35 | #print A,B,C,D 36 | return (float(A),float(B),float(C),float(D)) 37 | 38 | if __name__ == '__main__': 39 | #### INPUT #### 40 | ptsShp = r'E:\research\TIN-based\20150811\flat_triangle_pts.shp' 41 | #elevField = "ELEV" 42 | elevField = "Z" 43 | workspace = r'E:\research\TIN-based\20150811' 44 | #### END #### 45 | 46 | #### DEFAULT OUTPUT #### 47 | preprocessing_pts = workspace + os.sep + 'flat_triangle_new_point.shp' 48 | tin_origin_Shp = workspace + os.sep + 'flat_triangle_tin_origin.shp' 49 | preprocessing_tin = workspace + os.sep + 'flat_triangle_tin_preprocessed.shp' 50 | steepestpath_Shp = workspace + os.sep + 'test_steepestpath.shp' 51 | #### END #### 52 | 53 | #### GLOBAL VARIABLES #### 54 | VertexList = [] ## VertexList stores 3D coordinates (x,y,z) of all the input points 55 | TriangleVertexList = [] ## TriangleList stores all the triangles, each element stores index of vertexes 56 | TriangleNbrIdxList = [] ## TriangleNbrIdx stores index of triangle's neighbors, if there is not neighbor, set it None 57 | VertexTriangleList = [] ## VertexTriangleList stores every vertex's adjacent triangles in counterclockwise 58 | #### END #### 59 | 60 | #### TEMP VARIABLES #### 61 | pts2DList = [] ## temp list to store 2D coordinates of points 62 | 63 | #### END #### 64 | 65 | #### MAIN FUNCTIONS #### 66 | VertexList,pts2DList = ReadPoints(ptsShp,elevField) ## Read input shapefile of points 67 | ## Ready to construct hydrological TIN 68 | ## 1. Create Delaunay Triangulated Irregular Network 69 | ## 2. Remove Flat triangle by insert additional point using an inverse distance weighted interpolation with quadratic nodal functions 70 | ## 3. Remove pit by using a recursive algorithm 71 | ## 4. Handle flat edges by fliping operation 72 | 73 | TriangleVertexList,TriangleNbrIdxList,VertexTriangleList,VertexList = createTIN(VertexList,pts2DList) 74 | #print VertexList[len(VertexList)-1] 75 | WritePointShp(VertexList,elevField,preprocessing_pts) 76 | WritePolyonShp(TriangleVertexList,VertexList,tin_origin_Shp) 77 | del pts2DList 78 | 79 | ### 80 | # flatTriangle = [] ## store vertexes index of flat triangles 81 | # for tri in TriangleVertexList: 82 | # p1 = VertexList[tri[0]] 83 | # p2 = VertexList[tri[1]] 84 | # p3 = VertexList[tri[2]] 85 | # if p1[2] == p2[2] and p2[2] == p3[2]: 86 | # flatTriangle.append(tri) 87 | # #flatTriangle.append(TriangleVertexList.index(tri)) 88 | # print flatTriangle 89 | # for flatT in flatTriangle: 90 | # for flatV in flatT: 91 | # ${0} 92 | # v = [[1,1,4],[1,-1,3],[-1,-1,2]] 93 | # A,B,C,D = tranglePlane(v[0],v[1],v[2]) 94 | # print A,B,C,D 95 | # v0 = [1.,-1.,3.] 96 | # #v0 = [(v[0][0]+v[1][0]+v[2][0])/3.,(v[0][1]+v[1][1]+v[2][1])/3.,(v[0][2]+v[1][2]+v[2][2])/3.] 97 | # if C != 0: ## if C is 0, then the triangle is on the XY plane which need to be pitremoved! 98 | # intersect = findIntersectIdx(v,v0,A,B,C) 99 | # print intersect 100 | 101 | 102 | 103 | 104 | ###beifen### 105 | # fitPtsIdx = [] ## [[[,,]...],[[,,]...],[[,,]...]] 106 | # fitPtsIdx.append([tempPtsIdx[0]]) 107 | # fitPtsIdx.append([tempPtsIdx[1]]) 108 | # fitPtsIdx.append([tempPtsIdx[2]]) 109 | # for i in range(3): 110 | # tempVertex = f.vertex(i) 111 | # cir_faces = dt.incident_faces(tempVertex) 112 | # finites_faces = [] 113 | # f1 = cir_faces.next() 114 | # if dt.is_infinite(f1) == False: 115 | # finites_faces.append(f1) 116 | # for f2 in cir_faces: 117 | # if f2 == f1: 118 | # break 119 | # else: 120 | # if dt.is_infinite(f2) == False: 121 | # finites_faces.append(f2) 122 | # for f2 in finites_faces: 123 | # for j in range(3): 124 | # fitPtsIdx[i].append(pts2DList.index([f2.vertex(j).point()[0],f2.vertex(j).point()[1]])) 125 | # fitPtsIdxUnique = [] 126 | # for temp in fitPtsIdx: 127 | # fitPtsIdxUnique.append(list(set(temp))) 128 | # print fitPtsIdxUnique 129 | # fitPtsCoor = [] 130 | # for ptsIdx in fitPtsIdxUnique: 131 | # tempPtsCoor = [] 132 | # for inividualIdx in ptsIdx: 133 | # tempPtsCoor.append(pts[inividualIdx]) 134 | # fitPtsCoor.append(tempPtsCoor) 135 | # print fitPtsCoor 136 | -------------------------------------------------------------------------------- /TIN_Hydro/data/test.dbf: -------------------------------------------------------------------------------- 1 | s 2 | <A WZN  3265.000000 3255.000000 3272.100000 3262.730000 3245.400000 3281.120000 3278.070000 3301.500000 3287.390000 3261.400000 3334.020000 3322.520000 3322.520000 3301.570000 3361.620000 3354.710000 3355.580000 3364.110000 3379.350000 3441.030000 3417.420000 3444.860000 3254.580000 3235.180000 3370.000000 3320.420000 3356.130000 3232.790000 3409.030000 3410.680000 3314.360000 3315.110000 3345.720000 3380.420000 3370.440000 3370.380000 3361.000000 3350.630000 3336.680000 3324.230000 3333.460000 3298.850000 3317.230000 3309.670000 3306.220000 3279.390000 3288.260000 3253.800000 3287.520000 3288.950000 3252.830000 3268.870000 3390.460000 3416.310000 3286.230000 3392.100000 3220.830000 3235.950000 3208.570000 3218.450000 -------------------------------------------------------------------------------- /TIN_Hydro/data/test.sbn: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/data/test.sbn -------------------------------------------------------------------------------- /TIN_Hydro/data/test.sbx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/data/test.sbx -------------------------------------------------------------------------------- /TIN_Hydro/data/test.shp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/data/test.shp -------------------------------------------------------------------------------- /TIN_Hydro/data/test.shx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/data/test.shx -------------------------------------------------------------------------------- /TIN_Hydro/env/Config.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/Config.txt -------------------------------------------------------------------------------- /TIN_Hydro/env/x64_python/CGAL-Python-0.9.4b1.win-amd64-py2.7.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x64_python/CGAL-Python-0.9.4b1.win-amd64-py2.7.exe -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/CGAL-Python-0.9.4b1.win32-py2.7.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/CGAL-Python-0.9.4b1.win32-py2.7.exe -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/GDAL-1.11.2-cp27-none-win32.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/GDAL-1.11.2-cp27-none-win32.whl -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/alglib-3.10.0.cpython.gpl.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/alglib-3.10.0.cpython.gpl.zip -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/pip-7.1.0.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/pip-7.1.0.tar.gz -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/setuptools-18.2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/setuptools-18.2.zip -------------------------------------------------------------------------------- /TIN_Hydro/env/x86_python/vcredist_x86.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/TIN_Hydro/env/x86_python/vcredist_x86.exe -------------------------------------------------------------------------------- /TIN_Hydro/main.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Package : TIN-based Hydrological Analysis 3 | # 4 | # Created By: Liangjun Zhu 5 | # Date From : 5/13/15 6 | # Version : 7 | 8 | # Email : zlj@lreis.ac.cn 9 | # 10 | 11 | import os,sys 12 | from ShapefileIO import * 13 | from HydroTIN import * 14 | 15 | if __name__ == '__main__': 16 | #### INPUT #### 17 | ptsShp = r'E:\test\lyg\preprocess\lyg_elevs.shp' 18 | elevField = "elev" 19 | inBorder = "isBorder" ## if do not want to create concave TIN, set inBorder to be None. 20 | isOutlet = "isOutlet" ## This is aimed to avoid the outlet be filled as a sink. 21 | workspace = r'E:\test\lyg\20160218' 22 | #### OPTIONAL #### 23 | HANDLE_FLAT_TRIANGLE = True 24 | HANDLE_PIT = True 25 | HANDLE_FLAT_EDGE = True 26 | HANDLE_FALSE_DAM = True 27 | idwShepardParams = [15,25] ## refers to http://www.alglib.net/translator/man/manual.cpython.html#sub_idwbuildmodifiedshepard 28 | multiplier = 3 ## used for false dam detection, refers to detectFalseDam funtion in TINcreator.py 29 | angleThreshold = 0 ## used in FindChannelNodes 30 | #### END #### 31 | 32 | #### DEFAULT OUTPUT #### 33 | preprocessing_pts = workspace + os.sep + 'points_for_channel.shp' 34 | tin_origin_Shp = workspace + os.sep + 'tin_for_channel.shp' 35 | steepestDownPath_Shp = workspace + os.sep + 'steepest_descent_path.shp' 36 | channelpath_Shp = workspace + os.sep + 'channel_path.shp' 37 | steepestUpPath_Shp = workspace + os.sep + 'steepest_ascent_path.shp' 38 | added_node_Shp = workspace + os.sep + 'added_node_for_subbasin.shp' 39 | pts_update_Shp = workspace + os.sep + 'points_for_watershed.shp' 40 | tin_update_Shp = workspace + os.sep + 'tin_for_watershed.shp' 41 | newSteepestDownPath_Shp = workspace + os.sep + 'new_steepest_descent_path.shp' 42 | subbasin_Shp = workspace + os.sep + 'subbasin.shp' 43 | #### END #### 44 | 45 | #### GLOBAL VARIABLES #### 46 | VertexList = [] ## VertexList stores 3D coordinates (x,y,z) of all the input points 47 | TriangleVertexList = [] ## TriangleList stores all the triangles, each element stores index of vertexes 48 | TriangleNbrIdxList = [] ## TriangleNbrIdx stores index of triangle's neighbors, if there is not neighbor, set it None 49 | VertexTriangleList = [] ## VertexTriangleList stores every vertex's adjacent triangles in counterclockwise 50 | #### END #### 51 | 52 | #### TEMP VARIABLES #### 53 | pts2DList = [] ## temp list to store 2D coordinates of points 54 | ptsInBorderIdx = [] ## index of points that line in the boundary 55 | SWITCH = [HANDLE_FLAT_TRIANGLE,HANDLE_PIT,HANDLE_FLAT_EDGE,HANDLE_FALSE_DAM] 56 | #### END #### 57 | 58 | #### INTERMEDIATE #### 59 | SteepestDescentPathList = [] ## Each steepest path is consist with serveral vertexes from the centriod of triangle 60 | SteepestDescentPathVertexIdx = []## Corresponding to SteepestPathList, it stores the index, if the vertex is not node, assigned -1 61 | ChannelNodesDict = {} ## Dictionary of channel nodes, include downstream and upstreams 62 | #### END #### 63 | 64 | #### MAIN PROCEDURES #### 65 | ## 1. Read input shapefile of points 66 | VertexList,pts2DList, ptsInBorderIdx, outletIdx = ReadPoints(ptsShp, elevField, inBorder, isOutlet) 67 | #print ptsInBorderIdx,len(ptsInBorderIdx) 68 | 69 | ## 2. Construct hydrological TIN 70 | dtObject = createTIN(VertexList,pts2DList) 71 | dtObject, pts2DList, VertexList = preprocessTIN(dtObject, VertexList, pts2DList, idwShepardParams,multiplier,SWITCH, ptsInBorderIdx, outletIdx) 72 | TriangleVertexList,TriangleNbrIdxList,VertexTriangleList = TINstruct(dtObject, pts2DList, ptsInBorderIdx) 73 | #print len(TriangleNbrIdxList), TriangleNbrIdxList 74 | #print len(VertexTriangleList), VertexTriangleList 75 | WritePointShp(VertexList,elevField,preprocessing_pts) 76 | WritePolyonShp(TriangleVertexList,VertexList,tin_origin_Shp) 77 | 78 | ## 3. Trace steepest decent paths 79 | SteepestDescentPathList, SteepestDescentPathVertexIdx, breakLinePts = SteepestDescentPath(TriangleVertexList, VertexList, VertexTriangleList) 80 | WriteLineShp(SteepestDescentPathList, steepestDownPath_Shp) 81 | 82 | ## 4. Channel nodes and channel flow lines 83 | ChannelNodesDict, channelList, delimitPts, channelNodes = FindChannelNodes(TriangleVertexList, VertexTriangleList, VertexList, angleThreshold, outletIdx) 84 | #print delimitPts ## index of delimiting nodes with upstream and downstream nodes 85 | #print channelNodes 86 | #print ChannelNodesDict 87 | #print channelCoors 88 | #print channelFields 89 | channelCoors, channelFields = WriteChannelShp(channelList, channelpath_Shp) 90 | 91 | ## 5. Delineation of source area 92 | SteepestAscentPathList,SteepestAscentPathVertexIdx, breakLinePts, breakPtsInBorderIdx = SteepestAscentPath(delimitPts, channelNodes, TriangleVertexList, VertexList, VertexTriangleList, ptsInBorderIdx) 93 | # print len(breakLinePts) 94 | # print len(breakPtsInBorderIdx) 95 | WritePointShp(breakLinePts,elevField,added_node_Shp) 96 | WriteLineShp(SteepestAscentPathList, steepestUpPath_Shp) 97 | dtUpdate, newPts2DList, newVertexList, newPtsInBorderIdx = SubdivisionTIN(dtObject, breakLinePts, pts2DList, VertexList, ptsInBorderIdx, breakPtsInBorderIdx) 98 | dtUpdate, newPts2DList, newVertexList = preprocessTIN(dtUpdate, newVertexList, newPts2DList, idwShepardParams,multiplier,[False,False,False,False], newPtsInBorderIdx, outletIdx) 99 | newTriangleVertexList,newTriangleNbrIdxList,newVertexTriangleList = TINstruct(dtUpdate,newPts2DList, newPtsInBorderIdx) 100 | WritePointShp(newVertexList, elevField, pts_update_Shp) 101 | WritePolyonShp(newTriangleVertexList,newVertexList,tin_update_Shp) 102 | 103 | ## 6. Grouping triangles to subbasins 104 | subbasinInfo, newSteepestDescentPathList = GroupTriangles(channelCoors, channelFields, newTriangleVertexList, newVertexList, newVertexTriangleList) 105 | WriteLineShp(newSteepestDescentPathList, newSteepestDownPath_Shp) 106 | WriteSubbasin(subbasinInfo, newVertexList, subbasin_Shp) 107 | #### END #### -------------------------------------------------------------------------------- /Util/GeoTIFF_Converter.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | ## GeoTIFF Converter 4 | ## Convert other grid format to GeoTIFF 5 | ## Any Grid format supported by GDAL is permitted 6 | ## Coded by Liangjun Zhu, 2015-11-29 7 | 8 | from osgeo import gdal 9 | from osgeo import osr 10 | from osgeo import gdalconst 11 | from gdalconst import * 12 | import numpy 13 | 14 | class Raster: 15 | def __init__(self, nRows, nCols, data, noDataValue=None, geotransform=None, srs=None): 16 | self.nRows = nRows 17 | self.nCols = nCols 18 | self.data = data 19 | self.noDataValue = noDataValue 20 | self.geotrans = geotransform 21 | self.srs = srs 22 | self.dx = geotransform[1] 23 | self.xMin = geotransform[0] 24 | self.xMax = geotransform[0] + nCols*geotransform[1] 25 | self.yMax = geotransform[3] 26 | self.yMin = geotransform[3] + nRows*geotransform[5] 27 | 28 | def ReadRaster(rasterFile): 29 | ds = gdal.Open(rasterFile) 30 | band = ds.GetRasterBand(1) 31 | data = band.ReadAsArray() 32 | xsize = band.XSize 33 | ysize = band.YSize 34 | 35 | noDataValue = band.GetNoDataValue() 36 | geotrans = ds.GetGeoTransform() 37 | 38 | srs = osr.SpatialReference() 39 | srs.ImportFromWkt(ds.GetProjection()) 40 | #print srs.ExportToProj4() 41 | if noDataValue is None: 42 | noDataValue = -9999 43 | band = None 44 | ds = None 45 | return Raster(ysize, xsize, data, noDataValue, geotrans, srs) 46 | 47 | def WriteGTiffFile(filename, nRows, nCols, data, geotransform, srs, noDataValue, gdalType): 48 | format = "GTiff" 49 | driver = gdal.GetDriverByName(format) 50 | ds = driver.Create(filename, nCols, nRows, 1, gdalType) 51 | ds.SetGeoTransform(geotransform) 52 | ds.SetProjection(srs.ExportToWkt()) 53 | ds.GetRasterBand(1).SetNoDataValue(noDataValue) 54 | ds.GetRasterBand(1).WriteArray(data) 55 | ds = None 56 | def WriteAscFile(filename, data, xsize, ysize, geotransform, noDataValue): 57 | header = """NCOLS %d 58 | NROWS %d 59 | XLLCENTER %f 60 | YLLCENTER %f 61 | CELLSIZE %f 62 | NODATA_VALUE %f 63 | """ % (xsize, ysize, geotransform[0] + 0.5*geotransform[1], geotransform[3]-(ysize-0.5)*geotransform[1], geotransform[1], noDataValue) 64 | 65 | f = open(filename, 'w') 66 | f.write(header) 67 | for i in range(0, ysize): 68 | for j in range(0, xsize): 69 | f.write(str(data[i][j]) + "\t") 70 | f.write("\n") 71 | f.close() 72 | 73 | def Raster2GeoTIFF(tif,geotif, gdalType=gdal.GDT_Float32): 74 | print "Convering TIFF format to GeoTIFF..." 75 | rstFile = ReadRaster(tif) 76 | WriteGTiffFile(geotif, rstFile.nRows, rstFile.nCols, rstFile.data, rstFile.geotrans, rstFile.srs, rstFile.noDataValue, gdalType) 77 | print "Mission done!" 78 | def GRID2ASC(tif,asc): 79 | print "Convering Raster format to ASC file..." 80 | rstFile = ReadRaster(tif) 81 | WriteAscFile(asc, rstFile.data, rstFile.nCols, rstFile.nRows, rstFile.geotrans, rstFile.noDataValue) 82 | print "Mission done!" 83 | 84 | 85 | if __name__ == '__main__': 86 | rawDEM = r'C:\Users\ZhuLJ\Desktop\tmp.asc' 87 | GeoTiff = r'C:\Users\ZhuLJ\Desktop\tmp.tif' 88 | #Raster2GeoTIFF(rawDEM, GeoTiff) 89 | ASC = r'C:\Users\ZhuLJ\Desktop\tmp1.asc' 90 | GRID2ASC(rawDEM, ASC) 91 | -------------------------------------------------------------------------------- /Util/HardenSlpPos_Compare.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | from Util import * 4 | 5 | def Comparison(baseF, compF, valueF, equalF, gdalType=gdal.GDT_Int16): 6 | baseR = ReadRaster(baseF) 7 | compR = ReadRaster(compF) 8 | valueR = ReadRaster(valueF) 9 | baseD = baseR.data 10 | compD = compR.data 11 | valueD = valueR.data 12 | temp = baseD == compD 13 | equalData = numpy.where(temp, baseD, baseR.noDataValue) 14 | ## grid in which compF is coincident with baseF 15 | #WriteGTiffFile(equalF, baseR.nRows, baseR.nCols, equalData, baseR.geotrans, baseR.srs, baseR.noDataValue, gdalType) 16 | countStatDict = {'RDG':[0,0,0,0,0],'SHD':[0,0,0,0,0],'BKS':[0,0,0,0,0],'FTS':[0,0,0,0,0],'VLY':[0,0,0,0,0]} 17 | maxSimiDict = {'RDG':[[],[],[],[],[]],'SHD':[[],[],[],[],[]],'BKS':[[],[],[],[],[]],'FTS':[[],[],[],[],[]],'VLY':[[],[],[],[],[]]} 18 | meanMaxSimiDict = {'RDG':[0,0,0,0,0],'SHD':[0,0,0,0,0],'BKS':[0,0,0,0,0],'FTS':[0,0,0,0,0],'VLY':[0,0,0,0,0]} 19 | maxMaxSimiDict = {'RDG':[0,0,0,0,0],'SHD':[0,0,0,0,0],'BKS':[0,0,0,0,0],'FTS':[0,0,0,0,0],'VLY':[0,0,0,0,0]} 20 | minMaxSimiDict = {'RDG':[0,0,0,0,0],'SHD':[0,0,0,0,0],'BKS':[0,0,0,0,0],'FTS':[0,0,0,0,0],'VLY':[0,0,0,0,0]} 21 | stdMaxSimiDict = {'RDG':[0,0,0,0,0],'SHD':[0,0,0,0,0],'BKS':[0,0,0,0,0],'FTS':[0,0,0,0,0],'VLY':[0,0,0,0,0]} 22 | posList = ['RDG', 'SHD', 'BKS', 'FTS', 'VLY'] 23 | idxList = [1, 2, 4, 8, 16] 24 | # print baseR.noDataValue, compR.noDataValue 25 | for row in range(0, baseR.nRows): 26 | for col in range(0, baseR.nCols): 27 | if baseD[row][col] != baseR.noDataValue and compD[row][col] != compR.noDataValue: 28 | basePos = posList[idxList.index(baseD[row][col])] 29 | compPosIdx = idxList.index(compD[row][col]) 30 | countStatDict.get(basePos)[compPosIdx] += 1; 31 | maxSimiDict.get(basePos)[compPosIdx].append(valueD[row][col]) 32 | ## calculate statistics for maxSimiDict 33 | for pos in posList: 34 | for idx in range(0, len(posList)): 35 | if len(maxSimiDict.get(pos)[idx]) > 0: 36 | meanMaxSimiDict.get(pos)[idx] = numpy.mean(maxSimiDict.get(pos)[idx]) 37 | maxMaxSimiDict.get(pos)[idx] = numpy.max(maxSimiDict.get(pos)[idx]) 38 | minMaxSimiDict.get(pos)[idx] = numpy.min(maxSimiDict.get(pos)[idx]) 39 | stdMaxSimiDict.get(pos)[idx] = numpy.std(maxSimiDict.get(pos)[idx]) 40 | 41 | print countStatDict 42 | print meanMaxSimiDict 43 | print maxMaxSimiDict 44 | print minMaxSimiDict 45 | print stdMaxSimiDict 46 | def Comparison2(baseF, compF, valueF, equalF, gdalType=gdal.GDT_Int16): 47 | baseR = ReadRaster(baseF) 48 | compR = ReadRaster(compF) 49 | baseD = baseR.data 50 | compD = compR.data 51 | temp = baseD == compD 52 | equalData = numpy.where(temp, baseD, baseR.noDataValue) 53 | ## grid in which compF is coincident with baseF 54 | WriteGTiffFile(equalF, baseR.nRows, baseR.nCols, equalData, baseR.geotrans, baseR.srs, baseR.noDataValue, gdalType) 55 | rngNum = len(valueF) 56 | countList = [] 57 | idxList = [] 58 | countStatDict = {} 59 | for i in range(rngNum): 60 | countList.append(0) 61 | idxList.append(i) 62 | for i in range(rngNum): 63 | countStatDict[i] = countList[:] 64 | 65 | for row in range(baseR.nRows): 66 | for col in range(baseR.nCols): 67 | if baseD[row][col] != baseR.noDataValue and compD[row][col] != compR.noDataValue: 68 | baseV = baseD[row][col] 69 | compV = compD[row][col] 70 | countStatDict.get(int(baseV))[int(compV)] += 1 71 | print countStatDict 72 | 73 | def DiffMaxSimi(baseF, compF, valueF, diffF, gdalType=gdal.GDT_Float32): 74 | baseR = ReadRaster(baseF) 75 | compR = ReadRaster(compF) 76 | valueR = ReadRaster(valueF) 77 | baseD = baseR.data 78 | compD = compR.data 79 | valueD = valueR.data 80 | temp = baseD != compD 81 | diffData = numpy.where(temp, valueD, valueR.noDataValue) 82 | WriteGTiffFile(diffF, baseR.nRows, baseR.nCols, diffData, baseR.geotrans, baseR.srs, valueR.noDataValue, gdalType) 83 | def reClassify(baseF, destF,subValues, gdalType=gdal.GDT_Float32): 84 | baseR = ReadRaster(baseF) 85 | baseD = baseR.data 86 | rows = baseR.nRows 87 | cols = baseR.nCols 88 | destD = baseD 89 | for row in range(rows): 90 | for col in range(cols): 91 | baseV = baseD[row][col] 92 | if baseV != baseR.noDataValue: 93 | for rng in subValues: 94 | if rng[1] == 1: 95 | if baseV >= rng[0] and baseV <= rng[1]: 96 | destD[row][col] = subValues.index(rng) 97 | else: 98 | if baseV >= rng[0] and baseV < rng[1]: 99 | destD[row][col] = subValues.index(rng) 100 | WriteGTiffFile(destF, baseR.nRows, baseR.nCols, destD, baseR.geotrans, baseR.srs, baseR.noDataValue, gdalType) 101 | 102 | 103 | if __name__ == '__main__': 104 | # baseRaster = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\harden_qin2009.tif' 105 | # compRaster = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\harden_proposed.tif' 106 | # equalRaster = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\commonSlpPos.tif' 107 | # 108 | # baseMaxSimi = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\maxS_qin2009.tif' 109 | # compMaxSimi = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\maxS_proposed.tif' 110 | # diffMaxSimi = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareData\qin_based_maxS_diff.tif' 111 | 112 | #Comparison(baseRaster, compRaster, baseMaxSimi, equalRaster) 113 | #Comparison(compRaster, baseRaster, compMaxSimi, equalRaster) 114 | #DiffMaxSimi(baseRaster, compRaster, baseMaxSimi, diffMaxSimi) 115 | 116 | FileName = ['RdgInf.tif','ShdInf.tif','BksInf.tif','FtsInf.tif','VlyInf.tif'] 117 | for filename in FileName: 118 | compF = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareWithQin2009\basedOriginRPI\FuzzySlpPos\%s' % filename 119 | baseF = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareWithQin2009\Qin_2009_version2\FuzzySlpPos\%s' % filename 120 | workspace = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareWithQin2009\comparison\qin_as_base' 121 | subSection = [[0.8,1],[0.6,0.8],[0.4,0.6],[0.2,0.4],[0,0.2]] 122 | ## 0 1 2 3 4 123 | baseDestF = workspace + '\\base_' + filename 124 | reClassify(baseF, baseDestF, subSection) 125 | compDestF = workspace + '\\comp_' + filename 126 | reClassify(compF, compDestF, subSection) 127 | equalF = workspace + '\\equal_' + filename 128 | Comparison2(baseDestF, compDestF, subSection, equalF) 129 | 130 | -------------------------------------------------------------------------------- /Util/Similarity_Compare.py: -------------------------------------------------------------------------------- 1 | from Util import * 2 | def valuesByCell(baseF, compF): 3 | baseR = ReadRaster(baseF) 4 | compR = ReadRaster(compF) 5 | baseD = baseR.data 6 | compD = compR.data 7 | rows = baseR.nRows 8 | cols = baseR.nCols 9 | baseL = [] 10 | compL = [] 11 | for row in range(rows): 12 | for col in range(cols): 13 | baseV = baseD[row][col] 14 | compV = compD[row][col] 15 | if baseV != baseR.noDataValue or compV != compR.noDataValue: 16 | if baseV > ZERO and compV > ZERO: 17 | baseL.append(baseV) 18 | compL.append(compV) 19 | return (baseL, compL) 20 | 21 | if __name__ == '__main__': 22 | FileName = 'VlyInf.tif' 23 | baseF = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareWithQin2009\basedOriginRPI\withoutElev\FuzzySlpPos\%s' % FileName 24 | compF = r'E:\data_m\AutoFuzSlpPos\C&G_zhu_2016\CompareWithQin2009\Qin_2009_version2\FuzzySlpPos\%s' % FileName 25 | baseL, compL = valuesByCell(baseF, compF) 26 | print baseL 27 | print compL 28 | -------------------------------------------------------------------------------- /Util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crazyzlj/Python/e47fd374c04a945a4201ec1f12502ed28fda5782/Util/__init__.py -------------------------------------------------------------------------------- /Util/available_font_matplotlib.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # https://www.zhihu.com/question/25404709 3 | from matplotlib.font_manager import FontManager 4 | import subprocess 5 | 6 | fm = FontManager() 7 | mat_fonts = set(f.name for f in fm.ttflist) 8 | #print(mat_fonts) 9 | output = subprocess.check_output('fc-list :lang=zh -f "%{family}\n"', shell=True) 10 | #print( '*' * 10, '系统可用的中文字体', '*' * 10) 11 | #print(output) 12 | zh_fonts = set(f.split(',', 1)[0] for f in output.decode('utf-8').split('\n')) 13 | available = mat_fonts & zh_fonts 14 | print('*' * 10, '可用的字体', '*' * 10) 15 | for f in available: 16 | print(f) 17 | -------------------------------------------------------------------------------- /Util/normalize_for_SoLIM_20141110.py: -------------------------------------------------------------------------------- 1 | ## Author: Liangjun Zhu 2 | ## Created on: 2014-11-9 3 | ## Version: 1.0 4 | ## Description: Normalize the input raster dataset to 0~100 or -50~50. 5 | ## Specifically, zero should be zero after normalizing. 6 | import arcpy 7 | from arcpy import env 8 | env.workspace = r'E:\Anhui\PurposeSampling\DataPrepare' 9 | arcpy.gp.overwriteOutput = 1 10 | arcpy.CheckOutExtension("spatial") 11 | inFile = "profc" 12 | outFile = "profc_norm.asc" 13 | temp = "temp" 14 | Max = arcpy.GetRasterProperties_management(inFile,"MAXIMUM") 15 | Min = arcpy.GetRasterProperties_management(inFile,"MINIMUM") 16 | maxValue = float(Max.getOutput(0)) 17 | minValue = float(Min.getOutput(0)) 18 | value_range = maxValue - minValue 19 | print maxValue,minValue,value_range 20 | #Exec1 = "100.*(\"%s\" - %.10f)/%.10f" % (inFile, minValue, value_range) 21 | Exec2 = "Con(\"%s\">0,\"%s\"*50./%.11f,Con(\"%s\"<0,(\"%s\"-(%.11f))*50./(%.11f),0))" % (inFile, inFile,maxValue, inFile, inFile, minValue, minValue) 22 | #print Exec1 23 | print Exec2 24 | #arcpy.gp.RasterCalculator_sa(Exec1, temp) 25 | arcpy.gp.RasterCalculator_sa(Exec2, temp) 26 | arcpy.RasterToASCII_conversion(temp, outFile) 27 | arcpy.Delete_management(temp) 28 | print "Successful!" 29 | -------------------------------------------------------------------------------- /Util/pond_preprocess.py: -------------------------------------------------------------------------------- 1 | from Util import * 2 | from TauDEM import DinfUpDependence 3 | import ogr 4 | import subprocess 5 | from shutil import rmtree 6 | def pond_without_stream(orgF, v1, stream, v2, outF, gdalType=gdal.GDT_Float32): 7 | ''' 8 | Eliminate stream grid which located in pond 9 | :param orgF: POND source grid with unique value 10 | :param v1: value that represents pond 11 | :param stream: STREAM source grid with unique value 12 | :param v2: value that represents stream 13 | :param outF: output grid 14 | :param gdalType: (default parameter) output grid file type 15 | :return: 16 | ''' 17 | orgR = ReadRaster(orgF) 18 | orgD = orgR.data 19 | streamR = ReadRaster(stream) 20 | streamD = streamR.data 21 | srows = streamR.nRows 22 | scols = streamR.nCols 23 | rows = orgR.nRows 24 | cols = orgR.nCols 25 | #print streamR.noDataValue 26 | if srows == rows and scols == cols: 27 | destD = streamD[:][:] 28 | for row in range(rows): 29 | for col in range(cols): 30 | if streamD[row][col] != streamR.noDataValue and streamD[row][col] != v2 and orgD[row][col] == v1: 31 | destD[row][col] = 1 32 | else: 33 | destD[row][col] = streamR.noDataValue 34 | WriteGTiffFile(outF, rows, cols, destD, streamR.geotrans, streamR.srs, streamR.noDataValue, gdalType) 35 | else: 36 | print "raster size unmatched!" 37 | return 38 | def GDAL_SWAP(inraster, inshape, fieldName): 39 | ds = ogr.Open(inshape) 40 | lyr = ds.GetLayer(0) 41 | lyr.ResetReading() 42 | ft = lyr.GetNextFeature() 43 | while ft: 44 | cur_field_name = ft.GetFieldAsString(fieldName) 45 | outraster = inraster.replace('.tif', '_%s.tif' % cur_field_name.replace(' ', '_')) 46 | subprocess.call(['gdalwarp', inraster, outraster, '-cutline', inshape, 47 | '-crop_to_cutline', '-cwhere', "'%s'='%s'" % (fieldName, cur_field_name)]) 48 | ft = lyr.GetNextFeature() 49 | ds = None 50 | def Calculate_PND_FR(rs, subbasinShp, fieldName, tempDir, outCSV): 51 | ''' 52 | 53 | :param rs: 54 | :param subbasinShp: 55 | :param fieldName: 56 | :param tempDir: 57 | :param outCSV: 58 | :return: 59 | ''' 60 | ### stats = {subbasinID: [pondNum, pondSrcNum, subbasinNum]...} 61 | #GDAL_SWAP(pond, subbasinShp, fieldName) 62 | rmmkdir(tempDir) 63 | stats = {} 64 | ds = ogr.Open(subbasinShp) 65 | lyr = ds.GetLayer(0) 66 | lyr.ResetReading() 67 | ft = lyr.GetNextFeature() 68 | while ft: 69 | cur_field_name = ft.GetFieldAsString(fieldName) 70 | counts = [] 71 | for r in rs: 72 | curFileName = r.split(os.sep)[-1] 73 | outraster = tempDir + os.sep + curFileName.replace('.tif','_%s.tif' % cur_field_name.replace(' ', '_')) 74 | subprocess.call(['gdalwarp', r, outraster, '-cutline', subbasinShp, 75 | '-crop_to_cutline', '-cwhere', "'%s'='%s'" % (fieldName, cur_field_name), '-dstnodata', '-9999' ]) 76 | counts.append(Counting(outraster)) 77 | stats[cur_field_name] = counts 78 | ft = lyr.GetNextFeature() 79 | ds = None 80 | ## delete temporary files 81 | #rmtree(tempDir,True) 82 | #stats = {'11': [39, 1327, 3025], '10': [5, 7, 2916], '13': [0, 0, 3202], '12': [155, 1528, 3181], '15': [68, 635, 12111], '14': [8, 295, 6426], '1': [43, 2218, 0], '3': [51, 2644, 7412], '2': [124, 1462, 8075], '5': [13, 38, 2580], '4': [0, 0, 5569], '7': [53, 171, 3551], '6': [0, 0, 1227], '9': [187, 2199, 3444], '8': [17, 24, 1056]} 83 | f = open(outCSV, 'w') 84 | f.write("subbsnID,pondNum,pondSrcNum,SubbsnNum\n") 85 | for sub in stats.keys(): 86 | nums = stats[sub] 87 | catStr = sub + ',' 88 | for num in nums: 89 | catStr += str(num) + "," 90 | catStr = catStr[0:len(catStr)-1] 91 | catStr += "\n" 92 | f.write(catStr) 93 | f.close() 94 | #print stats 95 | if __name__ == '__main__': 96 | ORG_POND_PATH = r'E:\data\zhongTianShe\model_data_swat\modeling\pond_preprocess' 97 | SWAT_PROJ_PATH = r'E:\data_m\QSWAT_projects\Done\baseSim_unCali\baseSim_unCali' 98 | DEM_NAME = 'dem_zts' 99 | PROC_NUM = 4 100 | MPI_PATH = None 101 | TauDEM_PATH = None 102 | UPAREA_THRESHOLD = 0.9 103 | 104 | POND_SRC = ORG_POND_PATH + os.sep + 'pond.tif' 105 | STREAM_SRC = SWAT_PROJ_PATH + os.sep + 'Source' + os.sep + DEM_NAME + 'src.tif' 106 | CUR_POND_PROCESS_PATH = SWAT_PROJ_PATH + os.sep + 'pond_preprocess' 107 | mkdir(CUR_POND_PROCESS_PATH) 108 | POND_WITHOUT_STREAM = CUR_POND_PROCESS_PATH + os.sep + 'pond_no_stream.tif' 109 | DINF_DIR = SWAT_PROJ_PATH + os.sep + 'Source' + os.sep + DEM_NAME + 'ang.tif' 110 | POND_UPAREA = CUR_POND_PROCESS_PATH + os.sep + 'pond_uparea.tif' 111 | POND_UPAREA_BINARY = CUR_POND_PROCESS_PATH + os.sep + 'pond_uparea_binary.tif' 112 | SUBBASIN = SWAT_PROJ_PATH + os.sep + 'Source' + os.sep + DEM_NAME + 'w.tif' 113 | SUBBASIN_SHP = SWAT_PROJ_PATH + os.sep + 'Source' + os.sep + DEM_NAME + 'wshed.shp' 114 | TEMPDIR = CUR_POND_PROCESS_PATH + os.sep + 'temp' 115 | PND_FR_CSV = CUR_POND_PROCESS_PATH + os.sep + 'pond_original.csv' 116 | ### begin to preprocessing pond related data 117 | ### 1. Eliminate stream grid which located in pond 118 | pond_without_stream(POND_SRC, 18, STREAM_SRC, 1, POND_WITHOUT_STREAM) 119 | ### 2. Invoke TauDEM function DinfUpDependence to calculate upstream sources 120 | DinfUpDependence(DINF_DIR, POND_WITHOUT_STREAM, POND_UPAREA, PROC_NUM, MPI_PATH, TauDEM_PATH) 121 | ### 3. Reclassify the POND_UPAREA according to a threshold, 0.9 by default 122 | Binarization(POND_UPAREA, POND_UPAREA_BINARY, UPAREA_THRESHOLD) 123 | ### 4. Calculate pond area, pond contributing area, and subbasin area 124 | Calculate_PND_FR([POND_WITHOUT_STREAM, POND_UPAREA_BINARY, SUBBASIN], SUBBASIN_SHP, 'subbasin', TEMPDIR, PND_FR_CSV) 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /Util/rasterCalculator.py: -------------------------------------------------------------------------------- 1 | from Util import * 2 | def changeValue(orgF, newF, oldValue, newValue, gdalType=gdal.GDT_Float32): 3 | orgR = ReadRaster(orgF) 4 | orgData = orgR.data 5 | cols = orgR.nCols 6 | rows = orgR.nRows 7 | #print orgR.noDataValue 8 | for col in range(cols): 9 | for row in range(rows): 10 | curValue = orgData[row][col] 11 | if curValue in oldValue: 12 | idx = oldValue.index(curValue) 13 | orgData[row][col] = newValue[idx] 14 | if curValue == orgR.noDataValue: 15 | orgData[row][col] = -9999 16 | WriteGTiffFile(newF, orgR.nRows, orgR.nCols, orgData, orgR.geotrans, orgR.srs, -9999, gdalType) 17 | def mode(vlist): 18 | if len(vlist) == 0: 19 | return None 20 | else: 21 | count_dict = {} 22 | for i in vlist: 23 | if count_dict.has_key(i): 24 | count_dict[i] += 1 25 | else: 26 | count_dict[i] = 1 27 | max_appear = 0 28 | for v in count_dict.values(): 29 | if v > max_appear: 30 | max_appear = v 31 | if max_appear == 1: 32 | return None 33 | mode_list = [] 34 | for k,v in count_dict.items(): 35 | if v == max_appear: 36 | mode_list.append(k) 37 | return mode_list 38 | def eliminateNoData(orgF, eliValue, newF, gdalType = gdal.GDT_Float32): 39 | orgR = ReadRaster(orgF) 40 | orgData = orgR.data 41 | cols = orgR.nCols 42 | rows = orgR.nRows 43 | #print orgR.noDataValue 44 | count = 9999 45 | iter = 0 46 | while count > 0: 47 | count = 0 48 | for col in range(cols): 49 | for row in range(rows): 50 | if orgData[row][col] == eliValue: 51 | count += 1 52 | nbrValues = [] 53 | for dr in drow: 54 | for dc in dcol: 55 | if row+dr < rows and col+dc < cols and orgData[row+dr][col+dc] != orgR.noDataValue and orgData[row+dr][col+dc] != eliValue: 56 | nbrValues.append(orgData[row+dr][col+dc]) 57 | modeValues = mode(nbrValues) 58 | if len(nbrValues) == 1 or modeValues is None: 59 | orgData[row][col] = nbrValues[0] 60 | else: 61 | orgData[row][col] = modeValues[0] 62 | iter += 1 63 | print "iterator number is: %d, total %d has been eliminated" % (iter, count) 64 | WriteGTiffFile(newF, orgR.nRows, orgR.nCols, orgData, orgR.geotrans, orgR.srs, -9999, gdalType) 65 | 66 | 67 | 68 | if __name__ == '__main__': 69 | rawRaster = r'E:\data\zhongTianShe\soil_preprocess\soil_liyang_clip.tif' 70 | calRaster = r'E:\data\zhongTianShe\soil_preprocess\soil_liyang_clip_cal.tif' 71 | #langxi: [3,4,6,9,15]->[9,10,6,7,53] 72 | #guangde: [4,5,7,10,13,20,22]->[8,9,10,6,7,53,50] 73 | #liyang: [3,4,5,6,7,15,18,19,32]->[4,22,19,1,3,23,5,2,7] 74 | #orgValue = [] 75 | #newValue = [] 76 | #changeValue(rawRaster, calRaster, orgValue, newValue,gdal.GDT_Int16) 77 | mosaicF = r'E:\data\zhongTianShe\soil_preprocess\mosaic_mask.tif' 78 | mosaicFNew = r'E:\data\zhongTianShe\soil_preprocess\mosaic_new.tif' 79 | eliminateNoData(mosaicF, 0, mosaicFNew, gdal.GDT_Int16) -------------------------------------------------------------------------------- /Util/test_chinese_matplotlib.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import matplotlib.pyplot as plt 3 | plt.rcParams['font.serif'] = 'SimSun' 4 | plt.plot((1,2,3),(4,3,-1)) 5 | plt.xlabel(u'横坐标 X-axis') 6 | plt.ylabel(u'纵坐标 Y-axis') 7 | plt.show() 8 | -------------------------------------------------------------------------------- /test/DEAP_tutorial.py: -------------------------------------------------------------------------------- 1 | from deap import base, creator, tools 2 | import random 3 | 4 | # create types 5 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) 6 | creator.create("Individual", list, fitness=creator.FitnessMin) 7 | # initialization 8 | IND_SIZE = 10 9 | 10 | toolbox = base.Toolbox() 11 | toolbox.register("attribute", random.random) 12 | toolbox.register("individual", tools.initRepeat, creator.Individual, 13 | toolbox.attribute, n=IND_SIZE) 14 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 15 | # operators 16 | def evaluate(individual): 17 | return sum(individual), 18 | 19 | toolbox.register("mate", tools.cxTwoPoint) 20 | toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1) 21 | toolbox.register("select", tools.selTournament, tournsize=3) 22 | toolbox.register("evaluate", evaluate) 23 | 24 | # algorithms 25 | def main(): 26 | pop = toolbox.population(n=50) 27 | CXPB, MUTPB, NGEN = 0.5, 0.2, 40 28 | # Evaluate the entire population 29 | fitnesses = map(toolbox.evaluate, pop) 30 | for ind, fit in zip(pop, fitnesses): 31 | ind.fitness.values = fit 32 | for g in range(NGEN): 33 | # select the next generation individuals 34 | offspring = toolbox.select(pop, len(pop)) 35 | # clone the selected individuals 36 | offspring = map(toolbox.clone, offspring) 37 | # apply crossover and mutation on the offspring 38 | for child1, child2 in zip(offspring[::2], offspring[1::2]): 39 | if random.random() < CXPB: 40 | toolbox.mate(child1, child2) 41 | del child1.fitness.values 42 | del child2.fitness.values 43 | for mutant in offspring: 44 | if random.random() < MUTPB: 45 | toolbox.mutate(mutant) 46 | del mutant.fitness.values 47 | # evaluate the individuals with an invalid fitness 48 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid] 49 | fitnesses = map(toolbox.evaluate, invalid_ind) 50 | for ind, fit in zip(invalid_ind, fitnesses): 51 | ind.fitness.values = fit 52 | # the population is entirely replaced by the offspring 53 | pop[:] = offspring 54 | return pop 55 | if __name__ == '__main__': 56 | print main() 57 | 58 | 59 | -------------------------------------------------------------------------------- /test/SCOOP_tutorial.py: -------------------------------------------------------------------------------- 1 | # Script to be launched with: python -m scoop scriptName.py 2 | from __future__ import print_function 3 | # allows Python 2 users to have a print function compatible with Python 3 4 | import random 5 | import operator 6 | from scoop import logger 7 | from scoop import futures 8 | from scoop import shared 9 | from math import hypot 10 | import time 11 | data = [random.randint(-1000, 1000) for r in range(1000)] 12 | 13 | def myParallelFunc(inV): 14 | myV = shared.getConst("myValue") 15 | return inV + myV 16 | def helloworld(v): 17 | return "Hello SCOOP from Future #{0}".format(v) 18 | 19 | def test(tries): 20 | return sum(hypot(random.random(), random.random()) < 1 for _ in range(tries)) 21 | 22 | def calcPi(nbFutures, tries): 23 | expr = futures.map(test, [tries] * nbFutures) 24 | return 4. * sum(expr) / float(nbFutures * tries) 25 | 26 | def calcPiSerial(nbFutures, tries): 27 | sumexpr = sum(list(map(test, [tries] * nbFutures))) 28 | return 4. * sumexpr / float(nbFutures * tries) 29 | 30 | if __name__ == '__main__': 31 | # # Python's standard serial function 32 | # dataSerial = list(map(abs, data)) 33 | # serialSum = sum(map(abs, data)) 34 | # print "serial: %f" % serialSum 35 | # # SCOOP's parallel function 36 | # dataParallel = list(futures.map(abs, data)) 37 | # parallelSum = futures.mapReduce(abs, operator.add, data) 38 | # assert dataSerial == dataParallel 39 | # print "parallel: %f" % parallelSum 40 | # shared.setConst(myValue = 5) 41 | # print (list(futures.map(myParallelFunc, range(10)))) 42 | # logger.warn("this is a warning test!") 43 | # returnValues = list(futures.map(helloworld, range(6))) 44 | # print("\n".join(returnValues)) 45 | t1 = time.time() 46 | print ("pi = {}".format(calcPi(3000, 5))) 47 | print (time.time() - t1) 48 | # t1 = time.time() 49 | # print("pi = {}".format(calcPiSerial(3000, 5000))) 50 | # print (time.time() - t1) 51 | 52 | -------------------------------------------------------------------------------- /test/TidyZotero.py: -------------------------------------------------------------------------------- 1 | import os 2 | from shutil import rmtree 3 | suffix = ['.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx', '.tex', '.txt'] 4 | deldirs = [] 5 | for root, dirs, files in os.walk(r"D:\mysync\storage"): 6 | for i in files: 7 | for suf in suffix: 8 | if i.find(suf) < 0: 9 | continue 10 | else: 11 | break 12 | else: # Can not find any useful documents. 13 | deldirs.append(root) 14 | deldirs = list(set(deldirs)) 15 | for deldir in deldirs: 16 | print "deleting %s..." % deldir 17 | rmtree(deldir) 18 | -------------------------------------------------------------------------------- /test/asc2tif.py: -------------------------------------------------------------------------------- 1 | from pygeoc.raster import RasterUtilClass 2 | 3 | ascfile = r'C:\z_code\DTA\RasterClass\data\luid.asc' 4 | tiffile = r'C:\z_code\DTA\RasterClass\data\luid.tif' 5 | 6 | RasterUtilClass.raster_to_gtiff(ascfile, tiffile) -------------------------------------------------------------------------------- /test/down_ts.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import urllib2 4 | import os 5 | import sys 6 | import fileinput 7 | import shutil 8 | import time 9 | 10 | from pygeoc.utils import UtilClass, StringClass 11 | 12 | 13 | def downloadByUrl(curUrl, filePath): 14 | if os.path.exists(filePath): 15 | return 16 | try: 17 | f = urllib2.urlopen(curUrl) 18 | data = f.read() 19 | with open(filePath, "wb") as code: 20 | code.write(data) 21 | return True 22 | except Exception: 23 | return False 24 | 25 | 26 | def read_data_items_from_txt(txt_file): 27 | """Read data items include title from text file, each data element are split by TAB or COMMA. 28 | Be aware, the separator for each line can only be TAB or COMMA, and COMMA is the recommended. 29 | Args: 30 | txt_file: full path of text data file 31 | Returns: 32 | 2D data array 33 | """ 34 | data_items = list() 35 | with open(txt_file, 'r') as f: 36 | for line in f: 37 | str_line = line.strip() 38 | if str_line != '' and str_line.find('#') < 0: 39 | line_list = StringClass.split_string(str_line, ['\t']) 40 | if len(line_list) <= 1: 41 | line_list = StringClass.split_string(str_line, [',']) 42 | data_items.append(line_list) 43 | return data_items 44 | 45 | 46 | def download_m3u8_resolution(ws, furl, name): 47 | dst_url = furl 48 | dst_file = ws + os.sep + 'pre_%s.m3u8' % name 49 | downloadByUrl('%s/output.m3u8' % furl, dst_file) 50 | if not os.path.exists(dst_file): 51 | return None 52 | with open(dst_file, 'r') as f: 53 | lines = f.readlines() 54 | for idx, line in enumerate(lines): 55 | line = line.strip() 56 | if '102400' in line and len(lines) > idx: 57 | dst_url += '/%s' % lines[idx + 1] 58 | rno = dst_url.rfind('/') 59 | return dst_url[:rno] 60 | print("Not found!") 61 | return None 62 | 63 | 64 | def download_actual_m3u8(ws, furl, name): 65 | dst_file = ws + os.sep + '%s.m3u8' % name 66 | downloadByUrl('%s/output.m3u8' % furl, dst_file) 67 | if not os.path.exists(dst_file): 68 | return None 69 | # replace "/mykey.key" with "mykey.key" 70 | for i, line in enumerate(fileinput.input(dst_file, inplace=1)): 71 | if '/mykey.key' in line: 72 | sys.stdout.write(line.replace('/mykey.key', 'mykey.key')) 73 | else: 74 | sys.stdout.write(line) 75 | return dst_file 76 | 77 | 78 | def download_ts_files(ws, dstf, baseurl): 79 | urls = list() 80 | with open(dstf, 'r') as f: 81 | lines = f.readlines() 82 | for line in lines: 83 | line = line.strip() 84 | if '.ts' not in line: 85 | continue 86 | tmpurl = '%s/%s' % (baseurl, line) 87 | urls.append(tmpurl) 88 | try_time = 0 89 | while try_time <= 3: 90 | if not downloadByUrl(tmpurl, ws + os.sep + line): 91 | try_time += 1 92 | time.sleep(0.1) 93 | else: 94 | break 95 | 96 | 97 | if __name__ == '__main__': 98 | # workspace = r'/home/zhulj/test/zhongYeWangXiao' 99 | workspace = r'D:\test\ts' 100 | fname_urls = workspace + os.sep + 'buchong.txt' 101 | key_file = workspace + os.sep + 'mykey.key' 102 | ffmpeg_bin = r'c:\ffmpeg\bin\ffmpeg' 103 | 104 | furls = read_data_items_from_txt(fname_urls) 105 | comb_dir = workspace + os.sep + 'combination' 106 | if not os.path.isdir(comb_dir): 107 | os.mkdir(comb_dir) 108 | for fdir, fname, furl in furls: 109 | print('downloading %s...' % fname) 110 | tmp_ws = workspace + os.sep + fdir 111 | if not os.path.isdir(tmp_ws): 112 | os.mkdir(tmp_ws) 113 | tmp_comb_dir = comb_dir + os.sep + fdir 114 | if not os.path.isdir(tmp_comb_dir): 115 | os.mkdir(tmp_comb_dir) 116 | tmp_ts_dir = tmp_ws + os.sep + fname 117 | if not os.path.isdir(tmp_ts_dir): 118 | os.mkdir(tmp_ts_dir) 119 | 120 | dstUrl = download_m3u8_resolution(tmp_ts_dir, furl, fname) 121 | if dstUrl is None: 122 | print("%s failed, can not download pre_output.m3u8\n" % fname) 123 | continue 124 | print(dstUrl) 125 | 126 | dstFile = download_actual_m3u8(tmp_ts_dir, dstUrl, fname) 127 | if dstFile is None: 128 | print("%s failed, can not download actual output.m3u8\n" % fname) 129 | continue 130 | print(dstFile) 131 | 132 | shutil.copy(key_file, tmp_ts_dir + os.sep + 'mykey.key') 133 | download_ts_files(tmp_ts_dir, dstFile, dstUrl) 134 | 135 | UtilClass.run_command([ffmpeg_bin, 136 | '-allowed_extensions', 'ALL', 137 | '-i', '%s/%s.m3u8' % (tmp_ts_dir, fname), 138 | '-bsf:a', 'aac_adtstoasc', 139 | '-vcodec', 'copy', 140 | '-c', 'copy', 141 | '-crf', '50', '%s/%s.mp4' % (tmp_comb_dir, fname)]) 142 | time.sleep(10) 143 | -------------------------------------------------------------------------------- /test/down_ts_linux.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import urllib2 4 | import os 5 | import sys 6 | import fileinput 7 | import shutil 8 | import time 9 | 10 | from pygeoc.utils import UtilClass, StringClass 11 | 12 | 13 | def downloadByUrl(curUrl, filePath): 14 | if os.path.exists(filePath): 15 | return 16 | try: 17 | f = urllib2.urlopen(curUrl) 18 | data = f.read() 19 | with open(filePath, "wb") as code: 20 | code.write(data) 21 | return True 22 | except Exception: 23 | return False 24 | 25 | 26 | def read_data_items_from_txt(txt_file): 27 | """Read data items include title from text file, each data element are split by TAB or COMMA. 28 | Be aware, the separator for each line can only be TAB or COMMA, and COMMA is the recommended. 29 | Args: 30 | txt_file: full path of text data file 31 | Returns: 32 | 2D data array 33 | """ 34 | data_items = list() 35 | with open(txt_file, 'r') as f: 36 | for line in f: 37 | str_line = line.strip() 38 | if str_line != '' and str_line.find('#') < 0: 39 | line_list = StringClass.split_string(str_line, ['\t']) 40 | if len(line_list) <= 1: 41 | line_list = StringClass.split_string(str_line, [',']) 42 | data_items.append(line_list) 43 | return data_items 44 | 45 | 46 | def download_m3u8_resolution(ws, furl, name): 47 | dst_url = furl 48 | dst_file = ws + os.sep + 'pre_%s.m3u8' % name 49 | downloadByUrl('%s/output.m3u8' % furl, dst_file) 50 | if not os.path.exists(dst_file): 51 | return None 52 | with open(dst_file, 'r') as f: 53 | lines = f.readlines() 54 | for idx, line in enumerate(lines): 55 | line = line.strip() 56 | if '102400' in line and len(lines) > idx: 57 | dst_url += '/%s' % lines[idx + 1] 58 | rno = dst_url.rfind('/') 59 | return dst_url[:rno] 60 | print("Not found!") 61 | return None 62 | 63 | 64 | def download_actual_m3u8(ws, furl, name): 65 | dst_file = ws + os.sep + '%s.m3u8' % name 66 | downloadByUrl('%s/output.m3u8' % furl, dst_file) 67 | if not os.path.exists(dst_file): 68 | return None 69 | # replace "/mykey.key" with "mykey.key" 70 | for i, line in enumerate(fileinput.input(dst_file, inplace=1)): 71 | if '/mykey.key' in line: 72 | sys.stdout.write(line.replace('/mykey.key', 'mykey.key')) 73 | else: 74 | sys.stdout.write(line) 75 | return dst_file 76 | 77 | 78 | def download_ts_files(ws, dstf, baseurl): 79 | urls = list() 80 | with open(dstf, 'r') as f: 81 | lines = f.readlines() 82 | for line in lines: 83 | line = line.strip() 84 | if '.ts' not in line: 85 | continue 86 | tmpurl = '%s/%s' % (baseurl, line) 87 | urls.append(tmpurl) 88 | try_time = 0 89 | while try_time <= 3: 90 | if not downloadByUrl(tmpurl, ws + os.sep + line): 91 | try_time += 1 92 | time.sleep(0.1) 93 | else: 94 | break 95 | 96 | 97 | if __name__ == '__main__': 98 | workspace = r'/home/zhulj/test/zhongYeWangXiao' 99 | # workspace = r'D:\test\ts' 100 | fname_urls = workspace + os.sep + 'filename_urls3.txt' 101 | key_file = workspace + os.sep + 'mykey.key' 102 | ffmpeg_bin = 'ffmpeg' 103 | 104 | furls = read_data_items_from_txt(fname_urls) 105 | comb_dir = workspace + os.sep + 'combination' 106 | if not os.path.isdir(comb_dir): 107 | os.mkdir(comb_dir) 108 | for fdir, fname, furl in furls: 109 | print('downloading %s...' % fname) 110 | tmp_ws = workspace + os.sep + fdir 111 | if not os.path.isdir(tmp_ws): 112 | os.mkdir(tmp_ws) 113 | tmp_comb_dir = comb_dir + os.sep + fdir 114 | if not os.path.isdir(tmp_comb_dir): 115 | os.mkdir(tmp_comb_dir) 116 | tmp_ts_dir = tmp_ws + os.sep + fname 117 | if not os.path.isdir(tmp_ts_dir): 118 | os.mkdir(tmp_ts_dir) 119 | 120 | dstUrl = download_m3u8_resolution(tmp_ts_dir, furl, fname) 121 | if dstUrl is None: 122 | print("%s failed, can not download pre_output.m3u8\n" % fname) 123 | continue 124 | print(dstUrl) 125 | 126 | dstFile = download_actual_m3u8(tmp_ts_dir, dstUrl, fname) 127 | if dstFile is None: 128 | print("%s failed, can not download actual output.m3u8\n" % fname) 129 | continue 130 | print(dstFile) 131 | 132 | shutil.copy(key_file, tmp_ts_dir + os.sep + 'mykey.key') 133 | download_ts_files(tmp_ts_dir, dstFile, dstUrl) 134 | 135 | UtilClass.run_command([ffmpeg_bin, 136 | '-allowed_extensions', 'ALL', 137 | '-i', '%s/%s.m3u8' % (tmp_ts_dir, fname), 138 | '-bsf:a', 'aac_adtstoasc', 139 | '-vcodec', 'copy', 140 | '-c', 'copy', 141 | '-crf', '50', '%s/%s.mp4' % (tmp_comb_dir, fname)]) 142 | time.sleep(10) 143 | -------------------------------------------------------------------------------- /test/mongoclient.py: -------------------------------------------------------------------------------- 1 | import pymongo 2 | from pymongo import MongoClient 3 | ip = "127.0.0.1" 4 | port = 27017 5 | conn = MongoClient(ip, port) 6 | # print conn.database_names() 7 | db = conn["model_dianbu2_30m_longterm"] 8 | # print db.collection_names() 9 | gfs = db["SPATIAL.files"] 10 | 11 | -------------------------------------------------------------------------------- /test/numpy_test.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | 4 | 5 | DIR_PAIRS = [(0, 1), 6 | (-1, 1), 7 | (-1, 0), 8 | (-1, -1), 9 | (0, -1), 10 | (1, -1), 11 | (1, 0), 12 | (1, 1)] 13 | 14 | 15 | def recursive_continuous_cells(numpyarray, row, col, idx): 16 | nrows, ncols = numpyarray.shape 17 | for r, c in DIR_PAIRS: 18 | new_row = row + r 19 | new_col = col + c 20 | if 0 <= new_row < nrows and 0 <= new_col < ncols: 21 | if numpyarray[new_row][new_col] == numpyarray[row][col]: 22 | if not [new_row, new_col] in idx: 23 | idx.append([new_row, new_col]) 24 | recursive_continuous_cells(numpyarray, new_row, new_col, idx) 25 | 26 | 27 | def get_continuous_count_cells_recursive(orgarray, orgv = 1): 28 | ''' 29 | Counting the number of continuous cells of a single raster layer 30 | Recursive version 31 | :param orgarray: 32 | :param orgv: 33 | :return: 34 | ''' 35 | newarray = numpy.copy(orgarray) 36 | rows, cols = numpy.shape(newarray) 37 | for i in range(rows): 38 | for j in range(cols): 39 | if newarray[i][j] == orgv: 40 | tempIdx = [[i, j]] 41 | recursive_continuous_cells(newarray, i, j, tempIdx) 42 | count = len(tempIdx) 43 | # print count 44 | for tmpR, tmpC in tempIdx: 45 | newarray[tmpR][tmpC] = count 46 | return newarray 47 | 48 | 49 | # def get_continuous_count_cells_iterative(orgarray, orgv = 1): 50 | # ''' 51 | # Counting the number of continuous cells of a single raster layer 52 | # Iterative version 53 | # :param orgarray: 54 | # :param orgv: 55 | # :return: 56 | # ''' 57 | 58 | 59 | if __name__ == "__main__": 60 | org = [[1, 0, 0, 0, 0, 0], 61 | [0, 1, 0, 0, 1, 1], 62 | [0, 0, 1, 0, 1, 0], 63 | [0, 0, 0, 0, 1, 0], 64 | [0, 0, 0, 1, 0, 0]] 65 | 66 | print (get_continuous_count_cells_recursive(org)) 67 | -------------------------------------------------------------------------------- /test/pyqgis_test.py: -------------------------------------------------------------------------------- 1 | from qgis.core import QgsApplication 2 | from PyQt4.QtGui import QDialog 3 | 4 | GUIEnabled=True 5 | app = QgsApplication([], GUIEnabled) 6 | 7 | dlg = QDialog() 8 | dlg.exec_() 9 | 10 | app.exit(app.exec_()) -------------------------------------------------------------------------------- /test/uniqueID_scoop.py: -------------------------------------------------------------------------------- 1 | import random 2 | import datetime 3 | import itertools 4 | from scoop import futures 5 | import threading 6 | import timeit 7 | import uuid 8 | _uid = threading.local() 9 | def genuid(): 10 | if getattr(_uid, "uid", None) is None: 11 | _uid.tid = threading.current_thread().ident 12 | _uid.uid = 0 13 | _uid.uid += 1 14 | return (_uid.tid, _uid.uid) 15 | def uniqueid(): 16 | seed = random.getrandbits(12) 17 | while True: 18 | yield seed 19 | seed += 1 20 | def __uniqueid__(): 21 | """ 22 | generate unique id with length 17 to 21. 23 | ensure uniqueness even with daylight savings events (clocks adjusted one-hour backward). 24 | 25 | if you generate 1 million ids per second during 100 years, you will generate 26 | 2*25 (approx sec per year) * 10**6 (1 million id per sec) * 100 (years) = 5 * 10**9 unique ids. 27 | 28 | with 17 digits (radix 16) id, you can represent 16**17 = 295147905179352825856 ids (around 2.9 * 10**20). 29 | In fact, as we need far less than that, we agree that the format used to represent id (seed + timestamp reversed) 30 | do not cover all numbers that could be represented with 35 digits (radix 16). 31 | 32 | if you generate 1 million id per second with this algorithm, it will increase the seed by less than 2**12 per hour 33 | so if a DST occurs and backward one hour, we need to ensure to generate unique id for twice times for the same period. 34 | the seed must be at least 1 to 2**13 range. if we want to ensure uniqueness for two hours (100% contingency), we need 35 | a seed for 1 to 2**14 range. that's what we have with this algorithm. You have to increment seed_range_bits if you 36 | move your machine by airplane to another time zone or if you have a glucky wallet and use a computer that can generate 37 | more than 1 million ids per second. 38 | 39 | one word about predictability : This algorithm is absolutely NOT designed to generate unpredictable unique id. 40 | you can add a sha-1 or sha-256 digest step at the end of this algorithm but you will loose uniqueness and enter to collision probability world. 41 | hash algorithms ensure that for same id generated here, you will have the same hash but for two differents id (a pair of ids), it is 42 | possible to have the same hash with a very little probability. You would certainly take an option on a bijective function that maps 43 | 35 digits (or more) number to 35 digits (or more) number based on cipher block and secret key. read paper on breaking PRNG algorithms 44 | in order to be convinced that problems could occur as soon as you use random library :) 45 | 46 | 1 million id per second ?... on a Intel(R) Core(TM)2 CPU 6400 @ 2.13GHz, you get : 47 | 48 | >>> timeit.timeit(uniqueid,number=40000) 49 | 1.0114529132843018 50 | 51 | an average of 40000 id/second 52 | """ 53 | mynow=datetime.datetime.now 54 | sft=datetime.datetime.strftime 55 | # store old datetime each time in order to check if we generate during same microsecond (glucky wallet !) 56 | # or if daylight savings event occurs (when clocks are adjusted backward) [rarely detected at this level] 57 | old_time=mynow() # fake init - on very speed machine it could increase your seed to seed + 1... but we have our contingency :) 58 | # manage seed 59 | seed_range_bits=8 # max range for seed 60 | seed_max_value=2**seed_range_bits - 1 # seed could not exceed 2**nbbits - 1 61 | # get random seed 62 | seed=random.getrandbits(seed_range_bits) 63 | current_seed=str(seed) 64 | # producing new ids 65 | while True: 66 | # get current time 67 | current_time=mynow() 68 | if current_time <= old_time: 69 | # previous id generated in the same microsecond or Daylight saving time event occurs (when clocks are adjusted backward) 70 | seed = max(1,(seed + 1) % seed_max_value) 71 | current_seed=str(seed) 72 | # generate new id (concatenate seed and timestamp as numbers) 73 | #newid=hex(int(''.join([sft(current_time,'%f%S%M%H%d%m%Y'),current_seed])))[2:-1] 74 | newid=int(''.join([sft(current_time,'%f%S%M%H%d%m%Y'),current_seed])) 75 | # save current time 76 | old_time=current_time 77 | # return a new id 78 | yield newid 79 | def __uniqueid2__(): 80 | id = int(str(uuid.uuid4().fields[-1])[:8]) 81 | while True: 82 | yield id 83 | id += 1 84 | 85 | def appendvalues(u): 86 | #return int(str(uuid.uuid4().fields[-1])[:8]) 87 | return __uniqueid2__().next() 88 | 89 | 90 | if __name__ == '__main__': 91 | # unique_sequence = uniqueid() 92 | # id1 = next(unique_sequence) 93 | # id2 = next(unique_sequence) 94 | # id3 = next(unique_sequence) 95 | # ids = list(itertools.islice(unique_sequence, 1000)) 96 | # print (id1,id2,id3) 97 | # print (ids) 98 | # print (genuid()) 99 | """ you get a new id for each call of uniqueid() """ 100 | # id2 = [] 101 | # for i in range(100000): 102 | # id2.append(__uniqueid__().next()) 103 | # print (len(id2)) 104 | # list(set(id2)) 105 | # print (len(id2)) 106 | results = [] 107 | results = list(futures.map(appendvalues, range(2000))) 108 | print (len(results)) 109 | list(set(results)) 110 | print (len(results)) 111 | # for pid, result in enumerate(results): 112 | # print (pid, result - results[0]) 113 | --------------------------------------------------------------------------------