├── VERSION ├── janus ├── tests │ ├── __init__.py │ ├── data │ │ ├── shp │ │ │ ├── counties_srb.cpg │ │ │ ├── counties_srb.shp │ │ │ ├── counties_srb.shx │ │ │ ├── counties_srb.prj │ │ │ ├── counties_srb.qpj │ │ │ └── counties_srb.dbf │ │ ├── gcam_2010_domain_3000.tiff │ │ ├── comp_outputs │ │ │ ├── domain_3000m_20yr.npy │ │ │ ├── profits_3000m_20yr.npy │ │ │ └── landcover_3000m_20yr.npy │ │ ├── GenerateSyntheticPrices_test_input.csv │ │ ├── config.yml │ │ ├── GenerateSyntheticPrices_test_output.csv │ │ └── CDL2GCAM_categories.csv │ ├── test_nass_agent_data.py │ ├── test_model.py │ └── test_crop_decider.py ├── agents │ ├── __init__.py │ ├── urban.py │ ├── farmer.py │ └── d_cell.py ├── crop_functions │ ├── __init__.py │ └── crop_decider.py ├── postprocessing │ ├── __init__.py │ └── create_figures.py ├── preprocessing │ ├── __init__.py │ ├── geofxns.py │ ├── get_gis_data.py │ ├── convert_gcam_usa_prices.py │ ├── get_nass_agent_data.py │ ├── landcover_preprocessing.py │ └── generate_synthetic_prices.py ├── __init__.py ├── install_supplement.py ├── config_reader.py ├── initialize_agents_domain.py └── model.py ├── MANIFEST.in ├── requirements.txt ├── .gitignore ├── example ├── example.py ├── switch_explore.py └── config.yml ├── .travis.yml ├── LICENSE ├── setup.py └── README.md /VERSION: -------------------------------------------------------------------------------- 1 | 0.0.0 2 | -------------------------------------------------------------------------------- /janus/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /janus/agents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /janus/crop_functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /janus/postprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /janus/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.cpg: -------------------------------------------------------------------------------- 1 | UTF-8 -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.shp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/shp/counties_srb.shp -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.shx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/shp/counties_srb.shx -------------------------------------------------------------------------------- /janus/tests/data/gcam_2010_domain_3000.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/gcam_2010_domain_3000.tiff -------------------------------------------------------------------------------- /janus/tests/data/comp_outputs/domain_3000m_20yr.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/comp_outputs/domain_3000m_20yr.npy -------------------------------------------------------------------------------- /janus/tests/data/comp_outputs/profits_3000m_20yr.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/comp_outputs/profits_3000m_20yr.npy -------------------------------------------------------------------------------- /janus/tests/data/comp_outputs/landcover_3000m_20yr.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAF-BoiseState/janus/HEAD/janus/tests/data/comp_outputs/landcover_3000m_20yr.npy -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | include LICENSE 4 | include janus/tests/data/config.yml 5 | include janus/tests/data/*.csv 6 | include janus/tests/data/shp/*.* 7 | include janus/tests/data/*.tiff 8 | include janus/tests/data/comp_outputs/*.npy 9 | -------------------------------------------------------------------------------- /janus/__init__.py: -------------------------------------------------------------------------------- 1 | from janus.model import * 2 | from janus.install_supplement import * 3 | from janus.preprocessing.get_gis_data import get_gis_data 4 | from janus.preprocessing.convert_gcam_usa_prices import gcam_usa_price_converter 5 | 6 | __all__ = ['Janus', 'InstallSupplement', 'get_gis_data', 'gcam_usa_price_converter'] 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.11 2 | scipy>=0.18 3 | matplotlib>=1.3.1,<4.0 4 | pandas>=0.19 5 | geopandas>=0.4.0 6 | setuptools>=24.2.0 7 | rasterstats>=0.13.0 8 | joblib>=0.11 9 | rasterio>=1.0.8 10 | pycrs>=1.0.1 11 | shapely>=1.6.1 12 | nass>=0.1.1 13 | fiona>=1.7.13 14 | pyyaml>=3.12 15 | seaborn>=0.10 16 | netCDF4>=1.5.4 17 | -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.prj: -------------------------------------------------------------------------------- 1 | PROJCS["WGS_1984_UTM_Zone_11N",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # raster files 2 | *.asc 3 | 4 | /Data/* 5 | /GCAM_SRP/*.* 6 | /GCAM_SRP/**/*.* 7 | abm/postprocessing/__pycache__/*.pyc 8 | */__pycache__/* 9 | *.zip 10 | */*/__pycache__/* 11 | *example/example.py 12 | 13 | # OS-specific files 14 | .DS_Store 15 | 16 | # python pyc files 17 | *.pyc 18 | 19 | # exclude .idea dir 20 | .idea 21 | 22 | # ignore .mat files 23 | *.mat 24 | 25 | # log files 26 | *.log 27 | 28 | # ignore build 29 | build 30 | dist 31 | janus.egg-info 32 | -------------------------------------------------------------------------------- /janus/tests/test_nass_agent_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Aug 21 14:08:22 2019 3 | @author: kendrakaiser 4 | """ 5 | 6 | import unittest 7 | import janus.preprocessing.get_nass_agent_data as nass 8 | 9 | 10 | class TestNassAgent(unittest.TestCase): 11 | 12 | def test_cleanup(self): 13 | test_val = nass.cleanup('2,400') 14 | 15 | known_val = int(2400) 16 | 17 | self.assertEqual(test_val, known_val) 18 | 19 | 20 | if __name__ == '__main__': 21 | unittest.main() -------------------------------------------------------------------------------- /example/example.py: -------------------------------------------------------------------------------- 1 | import janus 2 | 3 | 4 | def run_example(f): 5 | """Sample function to run example. 6 | 7 | :param f: Full path with file name and extension to a config.yml file 8 | 9 | :return: Model executes, returns Janus class attributes 10 | 11 | """ 12 | return janus.Janus(config_file=f) 13 | 14 | 15 | if __name__ == '__main__': 16 | 17 | config_file = '' 18 | 19 | janus_run = run_example(config_file) 20 | -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.qpj: -------------------------------------------------------------------------------- 1 | PROJCS["WGS 84 / UTM zone 11N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32611"]] 2 | -------------------------------------------------------------------------------- /janus/tests/test_model.py: -------------------------------------------------------------------------------- 1 | """test_builder.py 2 | 3 | Tests for model interface 4 | 5 | @license BSD 2-Clause 6 | 7 | """ 8 | 9 | import pkg_resources 10 | import unittest 11 | 12 | from janus import Janus 13 | 14 | 15 | class TestBuilder(unittest.TestCase): 16 | """Test BuildStaffWorkbooks attributes.""" 17 | 18 | DEFAULT_CONFIG_FILE = pkg_resources.resource_filename('janus', 'tests/data/config.yml') 19 | 20 | def test_n(self): 21 | """Check the number of months in the working hours file derived list.""" 22 | 23 | # n_months = len(TestBuilder.TEST_READ_OBJ.wkg_hrs_list) 24 | # 25 | # self.assertEqual(n_months, 12) 26 | 27 | pass 28 | -------------------------------------------------------------------------------- /janus/agents/urban.py: -------------------------------------------------------------------------------- 1 | # Author: Kendra Kaiser 2 | # Date: 8/13/2019 3 | # FileName: urban.py 4 | # Purpose: Holds definition of urban agent 5 | 6 | 7 | class Urban: 8 | """ Urban agents exist in locations where the land cover is urban, the class contains their attributes. 9 | The urban agent is currently not in use in Janus, but is a placeholder for future development. 10 | :param density: Density of urban location, high medium or low 11 | :type density: String 12 | """ 13 | 14 | def __init__(self, **kwargs): 15 | 16 | self.density = kwargs.get('density') 17 | 18 | assert(self.density == 0 or self.density == 1 or self.density == 2) 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | matrix: 3 | include: 4 | - python: 3.6 5 | dist: xenial 6 | warnings_are_errors: false 7 | sudo: required 8 | addons: 9 | apt: 10 | sources: 11 | - sourceline: 'ppa:ubuntugis/ubuntugis-unstable' 12 | packages: 13 | - libudunits2-dev 14 | - libproj-dev 15 | - libgeos-dev 16 | - gdal-bin 17 | - libgdal-dev 18 | cache: pip 19 | install: 20 | - pip install GDAL==$(gdal-config --version) --global-option=build_ext --global-option="-I/usr/include/gdal" 21 | - pip install -r requirements.txt 22 | - python setup.py -q install 23 | - pip install coverage 24 | script: 25 | - travis_wait 35 coverage run -m unittest discover 26 | after_success: 27 | - bash <(curl -s https://codecov.io/bash) 28 | -------------------------------------------------------------------------------- /janus/tests/data/GenerateSyntheticPrices_test_input.csv: -------------------------------------------------------------------------------- 1 | Corn,1,1,200000,90000,1,5000 2 | Wheat,2,3,100000,20000,4,1,1000 3 | Dry Beans,3,3,80000,10000,4,1,100 4 | Root_Tuber,4,1,180000,220000,1,5000 5 | OilCrop,5,2,150000,225000,0.3,1,1000 6 | SugarCrop,6,1,190000,230000,1,10000 7 | OtherGrain,7,1,160000,190000,1,10000 8 | Onions,8,2,195000,205000,0.5,1,1000 9 | FodderGrass,9,1,160000,190000,1,10000 10 | FodderHerb,10,1,180000,200000,1,25000 11 | Peas,11,3,165000,40000,4,1,3000 12 | MiscCrop,12,2,150000,250000,0.8,1,5000 13 | OtherArableLand,13,3,165000,10000,4,1,100 14 | Sod and Grass Seeds,14,1,175000,210000,1,10000 15 | Pasture,15,3,130000,200000,0.75,1,10000 16 | Hops,16,1,180000,220000,1,500 17 | Stone-Pome Fruit,18,2,190000,220000,0.4,1,20000 18 | Grapes,20,1,190000,220000,1,1000 19 | Mint,22,1,170000,190000,1,10000 -------------------------------------------------------------------------------- /example/switch_explore.py: -------------------------------------------------------------------------------- 1 | import janus.crop_functions.crop_decider as crpdec 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | s1 = crpdec.switching_prob_curve(4.5, 0.5, 1, 1.5, 100, 2.5) #swtiching averse 6 | s2 = crpdec.switching_prob_curve(0.25, 4.0, 1, 1.5, 100, 2.5) #will switch 7 | s3 = crpdec.switching_prob_curve(2, 2, 1, 1.5, 100, 1000) #switching nuetral 8 | s5 = crpdec.switching_prob_curve(7, 1.5, 1, 1.5, 100, 1000) #aging nuetral 9 | s4 = crpdec.switching_prob_curve(9.5, 0.5, 1, 1.5, 100, 1000) #aging averse 10 | 11 | fig = plt.figure() 12 | ax = plt.axes() 13 | 14 | ax.plot(s1[0], s1[1], label='switching averse') 15 | ax.plot(s2[0], s2[1], label='switching tolerant') 16 | 17 | ax.plot(s3[0], s3[1], label='switching neutral') 18 | ax.plot(s4[0], s4[1], label='aging averse') 19 | ax.plot(s5[0], s5[1], label='aging neutral') 20 | 21 | plt.legend() -------------------------------------------------------------------------------- /janus/preprocessing/geofxns.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu May 30 15:09:10 2019 5 | 6 | @author: kek25 7 | 8 | Library of functions for geospatial processing 9 | """ 10 | 11 | import numpy as np 12 | from scipy import spatial 13 | 14 | def min_dist_city(gcam): 15 | """Calculate the minimum distance to a city cell. 16 | 17 | :param gcam: np.array of land cover of Snake River Basin GCAM categories, other key files will incorrectly identify city cells 18 | 19 | :return: np.array of distance to a city cell within the domain 20 | 21 | """ 22 | # TODO: update to based on key file 23 | urban_bool = np.logical_or(np.logical_or(gcam == 26, gcam == 27), np.logical_or(gcam == 17, gcam == 25)) 24 | 25 | rur = np.where(np.logical_and(~urban_bool, gcam != 0)) 26 | rural = np.array((rur[0], rur[1])).transpose() 27 | 28 | urb = np.where(urban_bool) 29 | urban = np.array((urb[0], urb[1])).transpose() 30 | 31 | tree = spatial.cKDTree(urban) 32 | mindist, minid = tree.query(rural) 33 | 34 | # reconstruct 2D np array with distance values 35 | urb_val = np.zeros(urban.shape[0]) 36 | idx = np.vstack((urban, rural)) 37 | dist = np.vstack((urb_val[:, None], mindist[:, None])) 38 | out = np.zeros(gcam.shape) 39 | out.fill(np.nan) 40 | 41 | for i in np.arange(dist.size): 42 | out[idx[i, 0]][idx[i, 1]] = dist[i] 43 | 44 | return out 45 | -------------------------------------------------------------------------------- /janus/install_supplement.py: -------------------------------------------------------------------------------- 1 | """ 2 | Install archived supplement. 3 | 4 | @author Chris R. Vernon 5 | @email: chris.vernon@pnnl.gov 6 | 7 | License: BSD 2-Clause, see LICENSE and DISCLAIMER files 8 | 9 | """ 10 | 11 | import argparse 12 | import pkg_resources 13 | 14 | from distutils.dir_util import copy_tree 15 | 16 | 17 | class InstallSupplement: 18 | """Transfer tests data to a directory of the users choosing to execute model. 19 | 20 | :param example_data_directory: Full path to the directory you wish to install 21 | the example data to. Must be write-enabled 22 | for the user. 23 | 24 | """ 25 | 26 | DATA_DIR = pkg_resources.resource_filename('janus', 'tests/data') 27 | 28 | def __init__(self, example_data_directory): 29 | 30 | # full path to the root directory where the example dir will be stored 31 | self.example_data_directory = example_data_directory 32 | 33 | copy_tree(InstallSupplement.DATA_DIR, self.example_data_directory) 34 | 35 | 36 | if __name__ == "__main__": 37 | 38 | parser = argparse.ArgumentParser() 39 | help_msg = 'Full path to the directory you wish to install the example data to.' 40 | parser.add_argument('example_data_directory', type=str, help=help_msg) 41 | args = parser.parse_args() 42 | 43 | zen = InstallSupplement(args.example_data_directory) 44 | del zen 45 | -------------------------------------------------------------------------------- /janus/agents/farmer.py: -------------------------------------------------------------------------------- 1 | # Author: Jonathan Carvajal 2 | # Date: 8/13/2019 3 | # FileName: farmer.py 4 | # Purpose: Holds definition of farmer 5 | 6 | 7 | class Farmer: 8 | """ The farmer class holds all relevant information about farmer agents. All attributes are optional. 9 | :param Age: Age of agent 10 | :type Age: Int 11 | :param Dist2city: Distance from agents location to the nearest city cell 12 | :type Dist2city: Float 13 | :param LandStatus: Farmers ownership status, e.g. tenured, owner, part owner 14 | :type LandStatus: String 15 | :param nFields: Number of fields that the farmer is managing 16 | :type nFields: Int 17 | :param alpha: Alpha parameter for the incomplete beta distribution 18 | :type alpha: Float 19 | :param beta: Beta parameter for the incomplete beta distribution 20 | :type beta: Float 21 | 22 | """ 23 | def __init__(self, **kwargs): 24 | self.Age = kwargs.get('Age') 25 | self.Dist2city = kwargs.get('Dist2city') 26 | 27 | self.LandStatus = kwargs.get('LandStatus') 28 | self.nFields = kwargs.get('nFields') 29 | 30 | self.alpha = kwargs.get('alpha') 31 | self.beta = kwargs.get('beta') 32 | 33 | def update_age(self): 34 | """ Updates the age of the agent by one year """ 35 | self.Age += 1 36 | 37 | def update_dist2city(self, new_dist): 38 | """ Updates the distance of the farmer to the city given new land cover""" 39 | self.Dist2city = new_dist 40 | 41 | def update_switch(self): 42 | """ Updates the farmers likelihood of switching their crop such that they are less likely to switch as they age 43 | """ 44 | self.alpha += 0.1 45 | self.beta -= 0.01 46 | -------------------------------------------------------------------------------- /janus/tests/data/config.yml: -------------------------------------------------------------------------------- 1 | # Configuration file for the ABM land use land cover change agent based model 2 | 3 | # full path with file name and extension to the counties shapefile 4 | f_counties_shp: 'janus/tests/data/shp/counties_srb.shp' 5 | 6 | # full path with file name and extension to the land class category key file 7 | f_key_file: 'janus/tests/data/CDL2GCAM_categories.csv' 8 | 9 | # gcam raster 10 | f_gcam_file: 'janus/tests/data/gcam_2010_domain_3000.tiff' 11 | 12 | # profits file 13 | f_profits_file: 'janus/tests/data/GenerateSyntheticPrices_test_output.csv' 14 | 15 | # output directory 16 | output_directory: None 17 | 18 | # number of time steps 19 | nt: 20 20 | 21 | # list of lists for switching averse, tolerant parameters (alpha, beta) 22 | switch_params: [[4.5, 1.0], [0.5, 3.0]] 23 | 24 | # proportion of each switching type, lower than p is averse, higher is tolerant 25 | p: 0.5 26 | 27 | # fraction of current profit at which the CDF is zero and one, and number of points to generate 28 | fmin: 1.0 29 | fmax: 1.5 30 | n: 100 31 | 32 | # TODO: Set seed for test data 33 | # Seed for random number generator 34 | crop_seed_size: 5 35 | 36 | # initialization year 37 | initialization_yr: 2010 38 | 39 | # scale of grid in meters 40 | scale: 3000 41 | 42 | # list of counties to evaluate 43 | county_list: ['Ada', 'Canyon'] 44 | 45 | # Currently available agent attributes, tenure status and area operated 46 | agent_variables: ["TENURE", "AREA OPERATED"] 47 | 48 | # NASS years that are available are 2007, 2012 49 | nass_year: 2007 50 | 51 | # state where NASS data is pulled from, as capatalized acronym 52 | state: 'ID' 53 | 54 | # List of counties used to gather NASS data, must be capitalized 55 | nass_county_list: ['ADA', 'CANYON'] 56 | 57 | # NASS API key 58 | nass_api_key: 'B5240598-2A7D-38EE-BF8D-816A27BEF504' -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause 2 | 3 | Copyright © 2019 4 | All rights reserved. 5 | 6 | 1. We, the repository owners, hereby grants permission to any person or entity lawfully 7 | obtaining a copy of this software and associated documentation files (hereinafter “the Software”) to redistribute and 8 | use the Software in source and binary forms, with or without modification. Such person or entity may use, copy, modify, 9 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and may permit others to do so, subject to 10 | the following conditions: 11 | 12 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. 13 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 14 | disclaimer in the documentation and/or other materials provided with the distribution. 15 | Other than as used herein, neither of our names may be used in any form whatsoever 16 | without the express written consent of the repository owners. 17 | 18 | 19 | 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 20 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE REPOSITORY OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | try: 5 | from setuptools import setup, find_packages 6 | except ImportError: 7 | raise("Must have `setuptools` installed to run setup.py. Please install and try again.") 8 | 9 | 10 | def readme(): 11 | with open('README.md') as f: 12 | return f.read().strip() 13 | 14 | 15 | def get_requirements(): 16 | with open('requirements.txt') as f: 17 | return f.read().split() 18 | 19 | def version(): 20 | with open('VERSION') as f: 21 | return f.read() 22 | 23 | try: 24 | import gdal 25 | 26 | except ImportError: 27 | 28 | # get gdal version on machine 29 | gdal_sys_call = subprocess.Popen('gdal-config --version', stdout=subprocess.PIPE, shell=True) 30 | gdal_system_version = gdal_sys_call.stdout.read().decode('UTF-8').strip() 31 | 32 | gdal_split = gdal_system_version.split('.') 33 | gdal_major = int(gdal_split[0]) 34 | gdal_minor = int(gdal_split[1]) 35 | 36 | if (gdal_system_version != '') and (gdal_major >= 2) and (gdal_minor >= 1): 37 | 38 | # install gdal version matching gdal libs on machine 39 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gdal=={}'.format(gdal_system_version)]) 40 | 41 | else: 42 | 43 | raise ImportError('GDAL version >= 2.1.0 required to run Janus. Please install GDAL with Python bindings and retry.') 44 | 45 | setup( 46 | name='janus', 47 | version=version(), 48 | packages=find_packages(), 49 | url='https://github.com/LEAF-BoiseState/janus.git', 50 | license='BSD 2-Clause', 51 | author='Kendra Kaiser; Lejo Flores', 52 | author_email='kendrakaiser@boisestate.edu; lejoflores@boisestate.edu', 53 | description='An agent based model to model land use', 54 | long_description=readme(), 55 | install_requires=get_requirements(), 56 | include_package_data=True, 57 | python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4' 58 | ) 59 | -------------------------------------------------------------------------------- /example/config.yml: -------------------------------------------------------------------------------- 1 | 2 | # Configuration file for Janus, an agent based model of land use and land cover change 3 | 4 | # directory for all initialization files 5 | f_input_dir: '' 6 | 7 | # raster of the initial land cover data 8 | f_init_lc_file: '' 9 | 10 | #profit choice flag, gcam or generated 11 | profits: 'gcam' 12 | 13 | # TODO have these run from within this script, taking in nt and setting nc 14 | # profits file 15 | f_profits_file: '' 16 | f_gcam_profits_file: '' 17 | 18 | # full path with file name and extension to the land class category key file 19 | f_key_file: '' 20 | 21 | # output directory 22 | output_directory: '' 23 | 24 | output_file: '' 25 | 26 | # number of time steps 27 | nt: 30 28 | 29 | # list of lists for switching averse, tolerant, and neutral parameters (alpha, beta) 30 | switch_params: [[4.5, 0.5], [0.25, 4.0], [2.0, 2.0]] 31 | 32 | # boolean of whether to base switching parameters on farmer attributes or not 33 | attr: False 34 | 35 | # proportion of each switching type (0.95 = switching tolerant) 36 | p: 0.95 37 | 38 | # fraction of current profit at which the CDF is zero and one, and number of points to generate 39 | fmin: 1.0 40 | fmax: 1.5 41 | n: 100 42 | 43 | # TODO: Set seed for test data 44 | # Seed for random number generator 45 | crop_seed_size: 5 46 | 47 | # initialization year 48 | initialization_yr: 2010 49 | 50 | # scale of grid in meters 51 | scale: 3000 52 | 53 | # Currently available agent attributes, tenure status and area operated 54 | agent_variables: ["TENURE", "AREA OPERATED"] 55 | 56 | # NASS years that are available are 2007, 2012 57 | nass_year: 2007 58 | 59 | # state where NASS data is pulled from, as capitalized acronym 60 | state: 'ID' 61 | 62 | #list of counties for spatial sub setting 63 | county_list: ['Ada', 'Canyon'] 64 | 65 | # List of counties used to gather NASS data, must be capitalized 66 | nass_county_list: ['ADA', 'CANYON'] 67 | 68 | # NASS API key 69 | nass_api_key: 'B5240598-2A7D-38EE-BF8D-816A27BEF504' 70 | -------------------------------------------------------------------------------- /janus/tests/data/shp/counties_srb.dbf: -------------------------------------------------------------------------------- 1 | w 2 | +a<COUNTY_ALLN countyC2 1Humboldt 2Gem 3Baker 4Grant 5Malheur 6Adams 7Union 8Harney 9Elko 10Payette 11Owyhee 12Washington 13Canyon 14FremontWY 15LincolnWY 16TetonWY 17Sublette 18Ada 19Bannok 20Bingham 21Blaine 22Boise 23Bonneville 24Butte 25Camas 26Caribou 27Cassia 28Clark 29Custer 30Elmore 31Fremont 32Gooding 33Jefferson 34Jerome 35Lemhi 36Lincoln 37Madison 38Minidoka 39Oneida 40Power 41Teton 42Twin Falls 43Valley  -------------------------------------------------------------------------------- /janus/agents/d_cell.py: -------------------------------------------------------------------------------- 1 | import janus.agents.urban as urban_class 2 | import janus.agents.farmer as farmer_class 3 | 4 | 5 | class Dcell: 6 | """A class that contains both static and dynamic information about the simulation domain 7 | 8 | :param Area: The area of the cell 9 | :param cLat: The latitude of the cell center 10 | :param cLon: The longitude of the cell center 11 | :param Elev: The elevation of the cell 12 | :param Slope: The slope of the cell 13 | :param Aspect: The aspect of the cell 14 | :param perSand: The soil percent sand 15 | :param perSilt: The soil percent silt 16 | :param perClay: The soil percent clay 17 | :param nUrbAgent: The number of urban agents in the cell (initialized to 0) 18 | :type nUrbAgents: Int 19 | :param nFarmAgent: The number of farmer agents in the cell (initialized to 0) 20 | :type nFarmAgents: Int 21 | :param UrbanAgents: An empty container in which to store UrbanAgent class types (initialized as empty) 22 | :param FarmerAgents: An empty container in which to store FarmerAgent class types (initialized as empty) 23 | 24 | :return: A pointer to a Dcell object with the above attributes. 25 | 26 | """ 27 | def __init__(self, **kwargs): 28 | self.Area = kwargs.get('Area') 29 | self.cLat = kwargs.get('cLat') 30 | self.cLon = kwargs.get('cLon') 31 | self.Elev = kwargs.get('Elev') 32 | self.Slope = kwargs.get('Slope') 33 | self.Aspect = kwargs.get('Aspect') 34 | self.perSand = kwargs.get('perSand') 35 | self.perSilt = kwargs.get('perSilt') 36 | self.perClay = kwargs.get('perClay') 37 | self.nUrbAgent = 0 38 | self.nFarmAgent = 0 39 | self.UrbanAgents = [] 40 | self.FarmerAgents = [] 41 | 42 | def add_agent(self, agent_struct): 43 | """Adds a new agent to a list 44 | pre: agentStruct has been instantiated with init.Agents and is a valid agent type 45 | post: agent has been added to an array 46 | 47 | :param agent_struct: a structure of agent type that will be added to the Dcell 48 | :type agent_struct: Class 49 | 50 | :return: null 51 | 52 | """ 53 | agent_type = type(agent_struct).__name__ 54 | 55 | if agent_type == urban_class.Urban.__name__: 56 | self.nUrbAgent += 1 57 | self.UrbanAgents.append(agent_struct) 58 | 59 | if agent_type == farmer_class.Farmer.__name__: 60 | self.nFarmAgent += 1 61 | self.FarmerAgents.append(agent_struct) 62 | -------------------------------------------------------------------------------- /janus/tests/test_crop_decider.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Wed Aug 14 15:52:44 2019 3 | 4 | @author: kendrakaiser 5 | """ 6 | 7 | import unittest 8 | import numpy as np 9 | import janus.crop_functions.crop_decider as cd 10 | 11 | 12 | class CropDeciderTest(unittest.TestCase): 13 | 14 | def test_switchingProbCurve(self): 15 | alpha=2 16 | beta=2 17 | fmin=0 18 | fmax=10 19 | n=5 20 | profit=1000 21 | 22 | x_known= np.array([0,2500,5000,7500,10000]) 23 | f_known= np.array([0, 0.15625, 0.5, 0.84375, 1]) 24 | 25 | x_test, f_test = cd.switching_prob_curve(alpha,beta,fmin,fmax,n,profit) 26 | 27 | self.assertEqual(x_known.all(), x_test.all()) 28 | self.assertEqual(f_known.all(), f_test.all()) 29 | 30 | def test_decide(self): 31 | 32 | alpha=2 33 | beta=2 34 | fmin=0 35 | fmax=10 36 | n=5 37 | profit=1000 38 | profit_p=1050 39 | 40 | ans = 0 41 | ans_test = cd.decide2switch(alpha,beta,fmin,fmax,n,profit,profit_p) 42 | 43 | self.assertEqual(ans, ans_test) 44 | 45 | def test_assessProfit(self): 46 | Crop = np.float64(15) 47 | Profits_cur =np.array([33335, 15559, 27343, 12477]) 48 | Profits_alt = np.array([31114, 15964, 27966, 14310]) 49 | Nc= 4 50 | CropIDs =np.array([1,2,3,10]) 51 | 52 | Profit_ant_test, Profit_p_test = cd.assess_profit(Crop, Profits_cur, Profits_alt, Nc, CropIDs) 53 | 54 | Profit_ant_known=np.array([0]) 55 | Profit_p_known=np.zeros([4,1]) 56 | 57 | self.assertEqual(Profit_ant_test, Profit_ant_known) 58 | self.assertEqual(Profit_p_test.all(), Profit_p_known.all()) 59 | 60 | def test_decideN(self): 61 | 62 | alpha=2 63 | beta=2 64 | fmin=0 65 | fmax=10 66 | n=5 67 | profit=1000 68 | vec_crops=np.array([1,2,3,10]) 69 | vec_crops=vec_crops.reshape((4,1)) 70 | vec_profit_p=np.zeros([4,1]) 71 | 72 | CropChoice_test, ProfitChoice_test = cd.profit_maximizer(alpha, beta, fmin, fmax, n, profit, vec_crops, \ 73 | vec_profit_p, rule=True) 74 | 75 | CropChoice_known = -1 76 | ProfitChoice_known = -1 77 | 78 | self.assertEqual(CropChoice_test, CropChoice_known) 79 | self.assertEqual(ProfitChoice_test, ProfitChoice_known) 80 | 81 | def test_MakeChoice(self): 82 | cd.define_seed(5) 83 | 84 | CropID_all=np.float64(15) 85 | Profit_last=0 86 | CropChoice=-1 87 | ProfitChoice=-1 88 | 89 | CropID_all_known=np.float(15.0) 90 | Profit_act_known =np.array([[[441.22748689]]]) 91 | 92 | CropID_all_test, Profit_act_test = cd.make_choice(CropID_all, Profit_last, CropChoice, ProfitChoice, seed=True) 93 | 94 | self.assertEqual(CropID_all_known, CropID_all_test) 95 | self.assertEqual(Profit_act_known.astype('int'), Profit_act_test.astype('int')) 96 | 97 | 98 | if __name__ == '__main__': 99 | unittest.main() 100 | -------------------------------------------------------------------------------- /janus/tests/data/GenerateSyntheticPrices_test_output.csv: -------------------------------------------------------------------------------- 1 | 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,20,22 2 | 194464.94,101197.51,79976.16,174615.87,150164.27,185391.35,143108.03,195429.77,163218.64,178978.06,162026.90,150794.53,165291.97,169868.95,134336.17,179934.20,162799.39,189637.36,153712.81 3 | 189257.13,118620.37,89776.06,178112.17,150056.52,181089.39,160443.41,195041.52,167761.71,160412.09,203271.26,146830.58,174719.23,174105.22,190759.41,181925.71,174679.78,191454.36,175486.53 4 | 189971.94,109890.63,84753.24,177121.06,150453.14,192249.53,172038.26,196055.66,165371.21,175519.37,183642.33,138308.50,169846.53,196106.80,218556.09,184122.13,207306.09,191897.48,172669.88 5 | 183247.31,84718.09,72688.57,184806.01,147828.61,188626.08,133232.05,196190.37,174261.31,154773.36,136888.52,148465.89,157604.36,181604.60,259553.02,185944.17,204519.92,194644.43,149636.25 6 | 174529.22,82113.03,71679.29,180624.64,151073.12,183403.79,165041.30,193948.01,165801.51,186904.34,133306.99,143001.28,156785.63,183113.43,296774.32,188257.15,178869.63,195776.19,185098.38 7 | 171681.53,105680.49,83180.54,185997.20,150776.75,210097.87,152698.60,194044.99,165346.00,187824.89,184526.98,145789.86,168280.31,186706.63,327056.50,191171.16,172600.41,197718.15,180850.51 8 | 163359.34,120576.49,89933.34,193993.28,225443.95,206176.45,188320.03,194929.45,165252.81,215043.44,205126.90,154900.55,174951.90,202161.75,323295.90,193223.78,187376.86,198483.32,167996.19 9 | 159527.04,104954.64,81673.81,191378.09,225856.72,206126.12,169641.63,195211.83,178486.13,157748.66,172436.88,153327.84,166699.31,190027.54,315982.11,194711.81,169024.07,200959.83,178611.45 10 | 155073.56,82404.45,70628.82,194229.32,226538.02,212420.54,167584.77,194730.63,172640.37,122155.32,124964.65,155646.47,155857.22,196837.42,313489.77,196600.23,225992.35,201369.78,190560.15 11 | 147998.15,87758.45,73736.96,196480.36,227368.87,204667.08,161296.68,194776.21,168155.16,184123.57,142456.80,148527.75,158916.74,190339.88,306159.39,198666.11,239575.05,202890.47,179863.28 12 | 142163.91,111794.79,86162.15,202070.92,225515.87,193721.76,176830.75,205860.13,155153.29,143646.44,189616.38,155338.98,171134.09,206687.06,239477.64,201316.01,210841.28,206351.04,180578.79 13 | 139584.59,117415.53,89101.87,197705.74,224527.11,215149.06,187777.17,203406.26,171085.76,166484.53,203870.90,160750.12,174143.95,198059.05,217067.13,203529.59,247873.31,207464.95,185965.96 14 | 135291.42,95261.86,78384.05,201911.93,224328.83,225732.68,188836.34,203342.44,179432.45,211230.57,160089.37,146670.56,163190.98,202962.69,154566.66,205337.64,231896.98,210525.61,180546.76 15 | 124770.19,79515.91,70038.63,207718.39,226855.07,229741.89,187292.60,203085.85,193962.72,205494.58,127339.66,152825.15,154890.40,194299.51,134316.06,207170.02,204419.03,211686.39,187935.53 16 | 123133.67,93100.21,76853.72,207148.21,225064.93,233664.97,172479.69,204388.30,188592.59,180264.51,152833.42,155457.54,161898.78,189500.17,55787.37,208697.16,238519.38,211348.22,175587.84 17 | 109740.66,115847.89,88443.75,220385.53,225633.80,228103.53,183800.41,206475.93,195056.09,219254.36,196075.97,150115.58,173440.68,210279.76,18690.12,211516.34,229266.58,213159.32,173422.33 18 | 113143.76,112822.45,87199.82,209376.06,223967.95,221029.15,187242.42,205754.15,176224.05,166304.06,193206.90,246693.80,172379.66,221330.66,-10226.19,213533.29,186239.21,215382.11,188911.12 19 | 93775.86,89668.49,75312.03,221396.63,225084.98,224202.81,172135.71,204581.99,194533.96,243476.45,149522.39,249635.86,160124.33,223749.94,-66384.36,214873.54,203526.06,220425.63,188186.42 20 | 87256.88,80454.90,70106.40,214712.27,224203.43,229230.65,184210.72,204629.18,198880.08,206163.67,126520.69,256134.96,155369.66,214596.61,-58718.94,217486.44,188139.92,217012.33,187562.16 21 | 89677.95,98574.15,79951.73,214890.86,224383.45,237281.35,180357.94,205047.53,175135.56,197034.01,168246.05,255003.28,164873.54,203555.07,-45997.15,221225.53,212981.61,218518.90,188929.39 -------------------------------------------------------------------------------- /janus/config_reader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import geopandas as gpd 4 | import yaml 5 | 6 | import janus.crop_functions.crop_decider as crpdec 7 | 8 | 9 | class ConfigReader: 10 | 11 | # keys found in the configuration file 12 | F_INIT_LC_FILE = 'f_init_lc_file' 13 | PROFITS = 'profits' 14 | F_PROFITS_FILE = 'f_profits_file' 15 | F_GCAM_PROFITS_FILE = 'f_gcam_profits_file' 16 | F_KEY_FILE = 'f_key_file' 17 | NT = 'nt' 18 | SWITCH_PARAMS = 'switch_params' 19 | ATTR = 'attr' 20 | P = 'p' 21 | FMIN = 'fmin' 22 | FMAX = 'fmax' 23 | N = 'n' 24 | CROP_SEED_SIZE = 'crop_seed_size' 25 | TARGET_YR = 'initialization_yr' 26 | SCALE = 'scale' 27 | AGENT_VARS = 'agent_variables' 28 | NASS_YR = 'nass_year' 29 | STATE = 'state' 30 | NASS_COUNTY_LIST = 'nass_county_list' 31 | NASS_API_KEY = 'nass_api_key' 32 | OUTPUT_DIR = 'output_directory' 33 | OUTPUT_FILE = 'output_file' 34 | 35 | # county field name in the input shapefile 36 | COUNTY_FLD = 'county' 37 | 38 | def __init__(self, config_file): 39 | 40 | c = self.read_yaml(config_file) 41 | 42 | self.f_init_lc_file = c[ConfigReader.F_INIT_LC_FILE] 43 | 44 | self.profits = c[ConfigReader.PROFITS] 45 | 46 | self.profits_file = pd.read_csv(c[ConfigReader.F_PROFITS_FILE], header=None) 47 | 48 | self.gcam_profits_file = pd.read_csv(c[ConfigReader.F_GCAM_PROFITS_FILE], header=0) 49 | 50 | self.key_file = pd.read_csv(c[ConfigReader.F_KEY_FILE]) 51 | 52 | self.output_dir = c[ConfigReader.OUTPUT_DIR] 53 | 54 | self.output_file = c[ConfigReader.OUTPUT_FILE] 55 | 56 | self.Nt = c[ConfigReader.NT] 57 | 58 | # set agent switching parameters (alpha, beta) [[switching averse], [switching tolerant]] 59 | self.switch = np.array(c[ConfigReader.SWITCH_PARAMS]) 60 | 61 | # boolean that sets whether to base switching parameters on age and tenure (True) or not 62 | self.attr = c[ConfigReader.ATTR] 63 | 64 | # proportion of each switching type, lower than p is averse, higher is tolerant 65 | self.p = c[ConfigReader.P] 66 | 67 | # fraction of current profit at which the CDF is zero and one, and number of points to generate 68 | self.fmin = c[ConfigReader.FMIN] 69 | self.fmax = c[ConfigReader.FMAX] 70 | self.n = c[ConfigReader.N] 71 | 72 | # TODO: define seed for crop decider; This is not used in this script but is set as `global` 73 | crpdec.define_seed(c[ConfigReader.CROP_SEED_SIZE]) 74 | 75 | # target year 76 | self.target_year = c[ConfigReader.TARGET_YR] 77 | 78 | # scale of grid in meters 79 | self.scale = c[ConfigReader.SCALE] 80 | 81 | # list of counties to pull NASS data from 82 | self.nass_county_list = c[ConfigReader.NASS_COUNTY_LIST] 83 | 84 | # agent variables 85 | self.agent_variables = c[ConfigReader.AGENT_VARS] 86 | 87 | # NASS year 88 | self.nass_year = c[ConfigReader.NASS_YR] 89 | 90 | # NASS state 91 | self.state = c[ConfigReader.STATE] 92 | 93 | # NASS county list 94 | self.nass_county_list = [i.upper() for i in c[ConfigReader.NASS_COUNTY_LIST]] 95 | 96 | # NASS API key 97 | self.nass_api_key = c[ConfigReader.NASS_API_KEY] 98 | 99 | @staticmethod 100 | def read_yaml(config_file): 101 | """Read the YAML config file to a dictionary. 102 | 103 | :param config_file: Full path with file name and extension to the input config file. 104 | 105 | :return: YAML dictionary-like object 106 | 107 | """ 108 | 109 | with open(config_file) as f: 110 | return yaml.safe_load(f) 111 | -------------------------------------------------------------------------------- /janus/preprocessing/get_gis_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Mon Apr 8 21:55:00 2019 3 | 4 | @author: kek25 5 | 6 | Select GIS data based on base year, and resolution and clip to extent to create the initial land cover coverage 7 | """ 8 | 9 | import os 10 | import geopandas as gp 11 | import janus.preprocessing.landcover_preprocessing as lc 12 | 13 | 14 | def get_gis_data(counties_shp, categories_csv, county_list, scale, year, raw_lc_dir, processed_lc_dir, init_lc_dir, 15 | gcam_category_type='local_GCAM_id'): 16 | """Pre-process GIS data based on counties, base year, and resolution. 17 | 18 | :param counties_shp: Full path with file name and extension to the input counties shapefile. 19 | :type counties_shp: str 20 | 21 | :param categories_csv: Full path with file name and extension to the input categories CSV file 22 | that bins CDL land classes to GCAM land classes 23 | :type categories_csv: str 24 | 25 | :param county_list: List of county names to process 26 | :type county_list: list 27 | 28 | :param scale: resolution of grid cells in meters 29 | :type scale: int 30 | 31 | :param year: Four digit year to process (e.g., 2000) 32 | :type year: int 33 | 34 | :param raw_lc_dir: Full path to the directory containing the raw land cover data 35 | :type raw_lc_dir: str 36 | 37 | :param processed_lc_dir: Full path to the directory containing the processed land cover data 38 | :type processed_lc_dir: str 39 | 40 | :param init_lc_dir: Full path to the directory where land cover initialization files are stored 41 | :type init_lc_dir: str 42 | 43 | :param gcam_category_type: Convert CDL data to GCAM categories of choice, Default 'local_GCAM_id' which 44 | is a set of ids that are specific to a local set of crop categories; where, 45 | 'GCAM_id_list' is the standard set of GCAM global categories. 46 | :type gcam_category_type: str 47 | 48 | :return: [0] Tiff; Land cover in GCAM categories at scale of input data 49 | [1] Tiff; Land cover in GCAM categories at user defined scale of interest 50 | [2] ESRI Shapefile; Extent of domain 51 | [3] ESRI Shapefile; grid of polygons for domain 52 | [4] Tiff; GCAM land cover for initiation year clipped to user defined extent 53 | 54 | 55 | """ 56 | # read counties shapefile as geopandas data frame 57 | gdf_counties = gp.read_file(counties_shp) 58 | gdf_counties.set_index('county', inplace=True) 59 | 60 | # convert cdl data to GCAM categories of choice 61 | lc.c2g(categories_csv, processed_lc_dir, raw_lc_dir, gcam_category_type) 62 | 63 | assert os.path.exists(os.path.join(processed_lc_dir, 'gcam_'+str(int(year))+'_srb.tiff')), \ 64 | 'get_gis_data.py ERROR: CDL to GCAM conversion was not successful, output does not exist' 65 | 66 | # convert GCAM file to scale of interest 67 | lc.agg_gcam(scale, processed_lc_dir, year) 68 | 69 | assert os.path.exists( 70 | os.path.join(processed_lc_dir, 'gcam_' + str(int(scale)) + '_domain_' + str(int(year)) + '.tiff')), \ 71 | 'get_gis_data.py ERROR: aggregation was not successful, output does not exist' 72 | 73 | # use the above file to create a polygon coverage & save; this allows for mapping each cell over time (?) 74 | lc.grid2poly(year, scale, processed_lc_dir, init_lc_dir) 75 | 76 | # use the poly grid to create the extent for the model - only needed if using other land cover data 77 | lc.get_extent(gdf_counties, county_list, scale, init_lc_dir) 78 | 79 | # crop land cover data from initialization year 80 | gcam_file = os.path.join(processed_lc_dir, 'gcam_' + str(int(scale)) + '_domain_' + str(int(year)) + '.tiff') 81 | lc.get_gcam(gdf_counties, county_list, gcam_file, init_lc_dir) 82 | 83 | assert os.path.exists(os.path.join(init_lc_dir, 'init_landcover_' + os.path.basename(gcam_file))), \ 84 | 'get_gis_data.py ERROR: clipping to user extent was not successful, output does not exist' 85 | -------------------------------------------------------------------------------- /janus/preprocessing/convert_gcam_usa_prices.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sun Nov 22, 2019 5 | 6 | @author: Kendra Kaiser 7 | 8 | Read in GCAM-USA output, convert to categories used in this instance for profit signal generation 9 | """ 10 | 11 | import numpy as np 12 | import sys 13 | import pandas as pd 14 | from scipy.stats import linregress 15 | 16 | 17 | def gcam_usa_price_converter(gcam_profits, profits_out, key_file, nc, nt, year): 18 | """Convert GCAM USA prices to the crop categories defined in the key file and format to be used by Janus 19 | 20 | :param gcam_profits: Full path and file name to GCAM-USA outputs 21 | :type gcam_profits: String 22 | :param profits_out: Full path and output file name to CSV file to which profit time series will be written 23 | :type profits_out: String 24 | :param key_file: Full path and file name of key file 25 | :type key_file: String 26 | :param nc: Number of crops to create profit time series for 27 | :type nc: Integer 28 | :param nt: Number of time steps in the time series 29 | :type nt: Integer 30 | :param year: Start year of model run 31 | :type year: Integer 32 | 33 | :return: null (output written to file) 34 | """ 35 | 36 | # Error traps 37 | assert nc > 0, 'convert_gcam_usa_prices.py ERROR: Negative number of crops encountered' 38 | assert nt > 0, 'convert_gcam_usa_prices.py ERROR: Negative number of time steps encountered' 39 | assert nc <= 28, 'convert_gcam_usa_prices.py ERROR: Too many crops encountered' 40 | 41 | # function to find nearest value 42 | def find_nearest(array, value): 43 | array = np.asarray(array) 44 | idx = (np.abs(array - value)).argmin() 45 | return array[idx] 46 | 47 | # read input data 48 | gcam_dat = pd.read_csv(gcam_profits) 49 | key = pd.read_csv(key_file) 50 | 51 | # parse input data 52 | crop_names = gcam_dat.sector.unique() 53 | valid_crops = np.where(key['GCAM_USA_price_id'].notna()) # GCAM-USA LU categories with crop prices 54 | gcam_usa_names = key['GCAM_USA_price_id'][ 55 | valid_crops[0]] # crop categories from GCAM-USA to use for SRB crop prices 56 | srb_ids = key['local_GCAM_id_list'][valid_crops[0]] 57 | 58 | # TODO fix this assert, need to drop crops that aren't in the SRB 59 | #assert all(np.sort(gcam_usa_names.unique()) == np.sort(crop_names)), 'convert_gcam_usa_prices.py ERROR: Crop ' \ 60 | #'names from gcam_usa do not match keyfile' 61 | 62 | # find start and end years from gcam data 63 | int_col = np.where(gcam_dat.columns == str(year))[0][0] 64 | end_yr = find_nearest(gcam_dat.columns[3:-1].astype(int), (year + nt)) 65 | end_col = np.where(gcam_dat.columns == str(end_yr))[0][0] 66 | 67 | # setup output array 68 | out = np.zeros([nt + 1, len(valid_crops[0])]) 69 | out[0, :] = np.transpose(srb_ids) 70 | 71 | yrs = np.array(gcam_dat.columns[int_col: end_col + 1].astype(int)) 72 | intval = yrs[1] - yrs[0] # interval between predicted prices 73 | prices_usa = gcam_dat[gcam_dat['region'] == 'USA'] 74 | prices = prices_usa.iloc[:, int_col:(end_col + 1)] 75 | 76 | # Create linear regressions between each timestep 77 | for c in np.arange(len(crop_names)): 78 | for y in np.arange(len(yrs)-1): 79 | yrs_ser = np.arange(yrs[y], yrs[y]+intval) 80 | x = [yrs[y], (yrs[y] + intval)] 81 | # create regression between years of GCAM data 82 | m, b, r_val, p_val, stderr = linregress(x, prices.iloc[c, y: y+2]) 83 | # predict prices for every year 84 | price_pred = m * yrs_ser + b 85 | if y == 0: 86 | price_ts = price_pred 87 | else: 88 | price_ts = np.append(price_ts, price_pred) 89 | 90 | # find corresponding SRB crop to place prices in outfile 91 | gcam_srb_idx = np.where(gcam_usa_names == crop_names[c])[0] 92 | for i in np.arange(len(gcam_srb_idx)): 93 | out[1:, gcam_srb_idx[i]] = np.transpose(price_ts) 94 | 95 | # error trap 96 | if out.shape[1] != nc: 97 | print('\nERROR: Mismatch in number of crops read and provided as input\n') 98 | print(str(nc) + ' crops were expected, ' + str(out.shape[1]) + ' were read. Check key file\n') 99 | sys.exit() 100 | 101 | # save output 102 | with open(profits_out, 'w') as fp: 103 | np.savetxt(fp, out, delimiter=',', fmt='%.5f') 104 | fp.close() 105 | -------------------------------------------------------------------------------- /janus/tests/data/CDL2GCAM_categories.csv: -------------------------------------------------------------------------------- 1 | CDL_id,CDL_name,GCAM_id,local_GCAM_id,GCAM_id_list,GCAM_name,GCAM_cat,local_GCAM_id_list,local_GCAM_Name,local_cat 2 | 1,Corn,1,1,1,Corn,ag,1,Corn,ag 3 | 237,Dbl Crop Barley/Corn,1,1,2,Wheat,ag,2,Wheat,ag 4 | 241,Dbl Crop Corn/Soybeans,1,1,3,Rice,ag,3,Dry Beans,ag 5 | 13,Pop or Orn Corn,1,1,4,Root_Tuber,ag,4,Root_Tuber,ag 6 | 12,Sweet Corn,1,1,5,OilCrop,ag,5,OilCrop,ag 7 | 230,Dbl Crop Lettuce/Durum Wht,2,2,6,SugarCrop,ag,6,SugarCrop,ag 8 | 225,Dbl Crop WinWht/Corn,2,2,7,OtherGrain,ag,7,OtherGrain,ag 9 | 238,Dbl Crop WinWht/Cotton,2,2,8,FiberCrop,ag,8,Onions,ag 10 | 26,Dbl Crop WinWht/Soybeans,2,2,9,FodderGrass,ag,9,FodderGrass,ag 11 | 22,Durum Wheat,2,2,10,FodderHerb,ag,10,FodderHerb,ag 12 | 23,Spring Wheat,2,2,11,Biomass,ag,11,Peas,ag 13 | 24,Winter Wheat,2,2,12,MiscCrop,ag,12,MiscCrop,ag 14 | 3,Rice,3,12,13,OtherArableLand,ag,13,OtherArableLand,ag 15 | 206,Carrots,4,4,14,PalmFruit,ag,14,Sod / Grass Seeds,ag 16 | 43,Potatoes,4,4,15,Pasture,ag,15,Pasture,ag 17 | 246,Radishes,4,4,16,UnmanagedPasture,ag,16,Hops,ag 18 | 46,Sweet Potatoes,4,4,17,UrbanLand,urb,17,UrbanLand - High Intensity,urb 19 | 247,Turnips,4,4,18,Willow,nat,18,Stone/Pome Fruit,ag 20 | 38,Camelina,5,5,19,Forest,nat,19,Forest,nat 21 | 31,Canola,5,5,20,UnmanagedForest,nat,20,Grapes,ag 22 | 239,Dbl Crop Soybeans/Cotton,5,5,21,Shrubland,nat,21,Shrubland,nat 23 | 32,Flaxseed,5,5,22,Grassland,nat,22,Mint,ag 24 | 35,Mustard,5,5,23,Tundra,nat,23,Wetlands,nat 25 | 211,Olives,5,5,24,RockIceDesert,nat,24,RockIceDesert,nat 26 | 10,Peanuts,5,5,,,,25,UrbanLand - Med Intensity,urb 27 | 34,Rape Seed,5,5,,,,26,UrbanLand - Low Intensity,urb 28 | 33,Safflower,5,5,,,,27,UrbanLand - Open Space,urb 29 | 5,Soybeans,5,5,,,,28,Water,water 30 | 6,Sunflower,5,5,,,,,, 31 | 41,Sugarbeets,6,6,,,,,, 32 | 45,Sugarcane,6,6,,,,,, 33 | 21,Barley,7,7,,,,,, 34 | 39,Buckwheat,7,7,,,,,, 35 | 235,Dbl Crop Barley/Sorghum,7,7,,,,,, 36 | 254,Dbl Crop Barley/Soybeans,7,7,,,,,, 37 | 234,Dbl Crop Durum Wht/Sorghum,7,7,,,,,, 38 | 233,Dbl Crop Lettuce/Barley,7,7,,,,,, 39 | 226,Dbl Crop Oats/Corn,7,7,,,,,, 40 | 240,Dbl Crop Soybeans/Oats,7,7,,,,,, 41 | 236,Dbl Crop WinWht/Sorghum,7,7,,,,,, 42 | 29,Millet,7,7,,,,,, 43 | 28,Oats,7,7,,,,,, 44 | 25,Other Small Grains,7,7,,,,,, 45 | 27,Rye,7,7,,,,,, 46 | 4,Sorghum,7,7,,,,,, 47 | 30,Speltz,7,7,,,,,, 48 | 205,Triticale,7,7,,,,,, 49 | 2,Cotton,8,12,,,,,, 50 | 36,Alfalfa,10,10,,,,,, 51 | 57,Herbs,10,10,,,,,, 52 | 224,Vetch,10,10,,,,,, 53 | 60,Switchgrass,11,12,,,,,, 54 | 51,Chick Peas,12,3,,,,,, 55 | 42,Dry Beans,12,3,,,,,, 56 | 49,Onions,12,8,,,,,, 57 | 53,Peas,12,11,,,,,, 58 | 75,Almonds,12,12,,,,,, 59 | 207,Asparagus,12,12,,,,,, 60 | 242,Blueberries,12,12,,,,,, 61 | 214,Broccoli,12,12,,,,,, 62 | 243,Cabbage,12,12,,,,,, 63 | 55,Caneberries,12,12,,,,,, 64 | 209,Cantaloupes,12,12,,,,,, 65 | 244,Cauliflower,12,12,,,,,, 66 | 245,Celery,12,12,,,,,, 67 | 70,Christmas Trees,12,12,,,,,, 68 | 72,Citrus,12,12,,,,,, 69 | 58,Clover/Wildflowers,12,12,,,,,, 70 | 250,Cranberries,12,12,,,,,, 71 | 50,Cucumbers,12,12,,,,,, 72 | 231,Dbl Crop Lettuce/Cantaloupe,12,12,,,,,, 73 | 232,Dbl Crop Lettuce/Cotton,12,12,,,,,, 74 | 248,Eggplants,12,12,,,,,, 75 | 208,Garlic,12,12,,,,,, 76 | 249,Gourds,12,12,,,,,, 77 | 219,Greens,12,12,,,,,, 78 | 213,Honeydew Melons,12,12,,,,,, 79 | 52,Lentils,12,12,,,,,, 80 | 227,Lettuce,12,12,,,,,, 81 | 47,Misc Vegs & Fruits,12,12,,,,,, 82 | 212,Oranges,12,12,,,,,, 83 | 44,Other Crops,12,12,,,,,, 84 | 71,Other Tree Crops,12,12,,,,,, 85 | 74,Pecans,12,12,,,,,, 86 | 216,Peppers,12,12,,,,,, 87 | 204,Pistachios,12,12,,,,,, 88 | 217,Pomegranates,12,12,,,,,, 89 | 229,Pumpkins,12,12,,,,,, 90 | 222,Squash,12,12,,,,,, 91 | 221,Strawberries,12,12,,,,,, 92 | 11,Tobacco,12,12,,,,,, 93 | 54,Tomatoes,12,12,,,,,, 94 | 76,Walnuts,12,12,,,,,, 95 | 48,Watermelons,12,12,,,,,, 96 | 59,Sod/Grass Seed,12,14,,,,,, 97 | 56,Hops,12,16,,,,,, 98 | 68,Apples,12,18,,,,,, 99 | 223,Apricots,12,18,,,,,, 100 | 66,Cherries,12,18,,,,,, 101 | 218,Nectarines,12,18,,,,,, 102 | 67,Peaches,12,18,,,,,, 103 | 77,Pears,12,18,,,,,, 104 | 220,Plums,12,18,,,,,, 105 | 210,Prunes,12,18,,,,,, 106 | 69,Grapes,12,20,,,,,, 107 | 14,Mint,12,22,,,,,, 108 | 61,Fallow/Idle Cropland,13,13,,,,,, 109 | 37,Other Hay/Non Alfalfa,15,9,,,,,, 110 | 176,Grassland/Pasture,15,15,,,,,, 111 | 62,Pasture/Grass,15,15,,,,,, 112 | 124,Developed/High Intensity,17,17,,,,,, 113 | 123,Developed/Med Intensity,17,25,,,,,, 114 | 82,Developed,17,26,,,,,, 115 | 122,Developed/Low Intensity,17,26,,,,,, 116 | 121,Developed/Open Space,17,27,,,,,, 117 | 141,Deciduous Forest,19,19,,,,,, 118 | 142,Evergreen Forest,19,19,,,,,, 119 | 63,Forest,19,19,,,,,, 120 | 143,Mixed Forest,19,19,,,,,, 121 | 152,Shrubland,21,21,,,,,, 122 | 64,Shrubland,21,21,,,,,, 123 | 195,Herbaceous Wetlands,23,23,,,,,, 124 | 87,Wetlands,23,23,,,,,, 125 | 190,Woody Wetlands,23,23,,,,,, 126 | 131,Barren,24,24,,,,,, 127 | 65,Barren,24,24,,,,,, 128 | 81,Clouds/No Data,24,24,,,,,, 129 | 88,Nonag/Undefined,24,24,,,,,, 130 | 112,Perennial Ice/Snow ,24,24,,,,,, 131 | 92,Aquaculture,24,28,,,,,, 132 | 111,Open Water,24,28,,,,,, 133 | 83,Water,24,28,,,,,, -------------------------------------------------------------------------------- /janus/initialize_agents_domain.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Mon Aug 12 11:15:12 2019 3 | 4 | @author: kek25 5 | """ 6 | import numpy as np 7 | 8 | import janus.agents.farmer as farmer 9 | import janus.agents.d_cell as cell 10 | import janus.agents.urban as urban 11 | import janus.preprocessing.get_nass_agent_data as getNASS 12 | 13 | 14 | def initialize_domain(ny, nx): 15 | """ Create empty domain array 16 | 17 | :param ny: Number of columns in domain 18 | :type ny: Int 19 | 20 | :param nx: Number of rows in domain 21 | :type nx: Int 22 | 23 | :return: Empty numpy array filled with class Dcell at each pixel 24 | :type: Numpy Array 25 | """ 26 | domain = np.empty((ny, nx), dtype=object) 27 | 28 | for i in np.arange(ny): 29 | 30 | for j in np.arange(nx): 31 | 32 | domain[i][j] = cell.Dcell() 33 | 34 | return domain 35 | 36 | 37 | def place_agents(ny, nx, lc, key_file, cat_option): 38 | """ Place agents on the landscape based on land cover and associated categorization 39 | 40 | :param ny: Number of columns in domain 41 | :type ny: Int 42 | 43 | :param nx: Number of rows in domain 44 | :type ny: Int 45 | 46 | :param lc: Initial land cover numpy array 47 | :type lc: Numpy Array 48 | 49 | :param key_file: csv file with categorization from CDL categories to GCAM or user defined categories, see README file. 50 | :type key_file: CSV file 51 | 52 | :param cat_option: Set whether using 'GCAM' or 'local'categorization. If the local characterization has been changed to 53 | have more or less categories, the number of rows to use in line 52/53 will need to be edited 54 | :type cat_option: String 55 | 56 | :return: numpy array of strings with each agent type 57 | :type: Numpy Array 58 | 59 | """ 60 | agent_array = np.empty((ny, nx), dtype='U10') 61 | 62 | if cat_option == 'local': 63 | 64 | agent_cat = key_file['local_cat'][0:28] 65 | code = key_file['local_GCAM_id_list'][0:28] 66 | 67 | elif cat_option == 'GCAM': 68 | 69 | agent_cat = key_file['GCAM_cat'][0:24] 70 | code = key_file['GCAM_id_list'][0:24] 71 | 72 | ag = np.array(code[agent_cat == 'ag']).astype(int) 73 | urb = np.array(code[agent_cat == 'urb']).astype(int) 74 | water = np.array(code[agent_cat == 'water']).astype(int) 75 | empty = np.array(code[agent_cat == 'nat']).astype(int) 76 | 77 | # this works, would be better without the for loops 78 | for i in ag: 79 | agent_array[lc == i] = farmer.Farmer.__name__ 80 | 81 | for i in water: 82 | agent_array[lc == i] = 'water' 83 | 84 | for i in urb: 85 | agent_array[lc == i] = urban.Urban.__name__ 86 | 87 | for i in empty: 88 | agent_array[lc == i] = 'empty' 89 | 90 | return agent_array 91 | 92 | 93 | def agents(agent_array, domain, dist2city, tenure_cdf, age_cdf, switch, ny, nx, lc, p, attr): 94 | """Place agent structures onto landscape and define attributes. 95 | 96 | :param agent_array: Numpy array of strings defining each agent type 97 | :type agent_array: Numpy Array 98 | 99 | :param domain: Initial domain filled with class Dcell 100 | :type domain: Numpy Array 101 | 102 | :param dist2city: Numpy array of distance to city (float) 103 | :type dist2city: Numpy Array 104 | 105 | :param tenure_cdf: CDF of tenure type in the domain 106 | :type tenure_cdf: 107 | 108 | :param age_cdf: CDF of ages in the domain 109 | :type age_cdf: 110 | 111 | :param switch: List of lists of parameter sets to describe agent switching behavior 112 | :type switch: List 113 | 114 | :param ny: Number of columns in domain 115 | :type ny: Int 116 | 117 | :param nx: Number of rows in domain 118 | :type nx: Int 119 | 120 | :param lc: Initial land cover numpy array 121 | :type lc: Numpy Array 122 | 123 | :param p: Percentage of switching averse farming agents 124 | :type p: Float 125 | 126 | :param attr: A boolean indicating whether or not to use switching curves based on tenure and age attributes 127 | :type attr: Bool 128 | 129 | :return: Domain with agents in each dCell 130 | :type: Numpy Array 131 | 132 | """ 133 | for i in np.arange(ny): 134 | 135 | for j in np.arange(nx): 136 | 137 | if agent_array[i][j] == farmer.Farmer.__name__: 138 | 139 | agent_data = getNASS.farmer_data(tenure_cdf, age_cdf, switch, dist2city[i][j], p, attr) 140 | new_agent = farmer.Farmer(Age=agent_data["AgeInit"], LandStatus=agent_data["LandStatus"], 141 | Dist2city=agent_data["Dist2city"], nFields=agent_data['nFields'], 142 | alpha=agent_data['Alpha'], 143 | beta=agent_data['Beta']) # this is passing actual agent data 144 | domain[i][j].add_agent(new_agent) 145 | 146 | if agent_array[i][j] == urban.Urban.__name__: 147 | 148 | agent_data = getNASS.urban_data(lc[i][j]) 149 | new_agent = urban.Urban(density=agent_data["Density"]) 150 | domain[i][j].add_agent(new_agent) 151 | 152 | return domain 153 | 154 | 155 | def init_profits(profit_signals, nt, ny, nx, crop_id_all, crop_ids): 156 | """Initialize np array of profits 157 | 158 | :param profit_signals: Profit signals created from generate synthetic prices, or user supplied 159 | :type profit_signals: Numpy Array 160 | 161 | :param nt: Number of time steps 162 | :type nt: Int 163 | 164 | :param ny: Number of columns in domain 165 | :type ny: Int 166 | 167 | :param nx: Number of rows in domain 168 | :type nx: Int 169 | 170 | :param crop_id_all: nt x nx x ny np array of current land cover 171 | :type crop_id_all: Numpy Array 172 | 173 | :param crop_ids: Num_crop x 1 np array of crop ids 174 | :type crop_ids: Numpy Array 175 | 176 | :return: Initial profits based on price signals 177 | :type: Numpy Array 178 | 179 | """ 180 | 181 | profits_actual = np.zeros((nt, ny, nx)) 182 | 183 | for i in np.arange(ny): 184 | 185 | for j in np.arange(nx): 186 | 187 | crop_ind = crop_id_all[0, i, j] 188 | crop_ix = np.where(crop_ids == crop_ind) 189 | 190 | if crop_ind in crop_ids: 191 | profits_actual[0, i, j] = profit_signals[crop_ix[0][0], 0] 192 | 193 | else: 194 | profits_actual[0, i, j] = 0 195 | 196 | return profits_actual 197 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![DOI](https://zenodo.org/badge/157612222.svg)](https://zenodo.org/badge/latestdoi/157612222) 2 | [![Build Status](https://travis-ci.org/LEAF-BoiseState/janus.svg?branch=master)](https://travis-ci.org/LEAF-BoiseState/janus) 3 | [![codecov](https://codecov.io/gh/LEAF-BoiseState/janus/branch/master/graph/badge.svg)](https://codecov.io/gh/LEAF-BoiseState/janus) 4 | 5 | 6 | # janus 7 | 8 | `janus` was designed to simulate land cover changes over time. These land cover changes are carried out by individual agents that choose to either continue planting the same crop, or choose to switch to a new crop based on expected profits. 9 | 10 | ## Contact 11 | - Kendra Kaiser (kendrakaiser@boisestate.edu) 12 | - Lejo Flores (lejoflores@boisestate.edu) 13 | 14 | ## Getting Started 15 | The `janus` package uses only Python 3.3 and up. 16 | 17 | ### Step 1: 18 | Clone the repository into your desired directory: 19 | 20 | `git clone https://github.com/LEAF-BoiseState/janus.git` 21 | 22 | ### Step 2: 23 | You can install `janus` by running the following from your cloned directory (NOTE: ensure that you are using the desired `python` instance): 24 | 25 | `python setup.py install` 26 | 27 | ### Step 3: 28 | Confirm that the module and its dependencies have been installed by running from your prompt: 29 | 30 | ```python 31 | from janus import Janus 32 | ``` 33 | 34 | If no error is returned then you are ready to go! 35 | 36 | ### Step 4: 37 | If you choose to install the example data run the following (you must have write access to the directory you choose to store the data in): 38 | 39 | ```python 40 | from janus import InstallSupplement 41 | 42 | InstallSupplement() 43 | ``` 44 | 45 | ## Setting up a run 46 | 47 | ### Setup the `config.yml` file 48 | There is an example config file in the `janus/example` directory of this package that describes each input. To conduct a test run, install the data supplement as described above and replace the paths in the example config file with the location of where you installed the example data. See the description below to match the example data file name with what is included with the package. 49 | 50 | | Key | Description | Example Data Name 51 | | -- | -- | -- | 52 | | `f_counties_shp` | full path with file name and extension to the counties shapefile | `shp/counties_srb.shp` | 53 | | `f_key_file` | full path with file name and extension to the land class category key file | `data/CDL2GCAM_categories.csv` | 54 | | `f_gcam_file` | GCAM raster file | `data/gcam_2010_domain_3000.tiff` | 55 | | `f_profits_file` | Profits file | `data/GenerateSyntheticPrices_test_output.csv` | 56 | | `nt` | Number of time steps | | 57 | | `switch_params` | list of lists for switching averse, tolerant parameters (alpha, beta) | | 58 | | `p` | Proportion of each switching type, lower than p is averse, higher is tolerant | | 59 | | `fmin` | The fraction of current profit at which the CDF of the beta distribution is zero | | 60 | | `fmax` | The fraction of current profit at which the CDF of the beta distribution is one | | 61 | | `n` | The number of points to generate in the CDF | | 62 | | `crop_seed_size` | Seed to set for random number generators for unit testing | | 63 | | `target_yr` | Initialization year associated with landcover input | | 64 | | `scale` | Scale of land cover grid in meters. Current options are 1000 and 3000 m | | 65 | | `county_list` | List of counties to evaluate | | 66 | | `agent_variables` | NASS variables to characterize agents with. Currently set to use "TENURE" and "AREA OPERATED" | | 67 | | `nass_year` | Year that NASS data are pulled from. This data is collected every 5 years, with the Initialization year here being 2007 | | 68 | | `nass_county_list` | List of counties in the domain that NASS data is collected from, these have to be capitalized | | 69 | | `nass_api_key` | A NASS API is needed to access the NASS data, get yours here https://quickstats.nass.usda.gov/api | | 70 | 71 | ### Setup the input files 72 | 73 | - `counties_shp.shp`: Shapefile of counties within the area of interest. This should have county names and a single identifier for each polygon. 74 | 75 | - `cdl.txt`: Cropland Data Layer 76 | 77 | - `key_file.csv`: This file must have the following column titles 'CDL_id', 'CDL_name', 'GCAM_id', 'local_GCAM_id', 'GCAM_id_list', 'GCAM_name', 'GCAM_cat', 'local_GCAM_id_list', 'local_GCAM_name', 'local_cat'. 78 | 79 | 'GCAM_id' and 'local_GCAM_id' are the conversion columns where the destination id is matched with each original CDL id. 80 | 81 | CDL_id, CDL_name, GCAM_id and GCAM_name are set based on the original CDL data and GCAM categorization. 82 | 83 | 'id_list' are numeric identifiers for each category associated with names ('GCAM_name', 'local_GCAM_name') of output categorization. 84 | 85 | Columns that start with 'local' are where the file can be modified to create location specific land cover categories. 86 | 87 | 'cat' is the generic category for assigning agents.'ag' for agricultural, 'nat' for natural (e.g. water, wetland), or 'urb' for urban land covers. 88 | 89 | - `profits_file.csv`: csv with the number of rows equal to number of crops. This contains the crop name, crop ID number, price function of choice and parameters for that function. 90 | 91 | ### Run Preprocessing Packages 92 | Run preprocessing scripts to set up initial land cover data and profits data. 93 | 94 | Janus is currently setup to use the NASS Cropland Data Layer, this data should be downloaded for the area of interest and the key_file should be updated to reflect the land cover categories of interest. If other land cover data is being used this step is not necessary. The aggregation step may take upwards of an hour depending on the extent. 95 | ```python 96 | from janus.preprocessing.get_gis_data import get_gis_data 97 | get_gis_data('', '', '', , , '', '', '', 98 | gcam_category_type='local_GCAM_id') 99 | ``` 100 | Janus can either convert profit data from GCAM-USA or generate synthetic profit signals. Again, the key_file will need to be modified to convert GCAM catergories to local land cover categories of interest. 101 | 102 | ```python 103 | from janus.preprocessing.convert_gcam_usa_prices import gcam_usa_price_converter 104 | convert_gcam_usa_prices('', '', , , ) 105 | ``` 106 | 107 | ## Running `janus` 108 | 109 | ### Running from terminal or command line 110 | Ensure that you are using the desired `python` instance then run: 111 | 112 | `python /model.py --config_file ` 113 | 114 | All parameters can be passed to the `Janus` class using terminal or command line instead of by a configuration file if you so desire. Simply exclude the `config_file` argument from the required parameters. Run the following for assistance: 115 | 116 | `python /model.py --help` 117 | 118 | ### Running from a Python Prompt or from another script 119 | 120 | ```python 121 | from janus import Janus 122 | Janus('') 123 | ``` 124 | 125 | ## Outputs 126 | 127 | - `landcover.npy`: Numpy array of landcover through time [Nt, Ny, Nx] 128 | - `domain.npy`: Numpy array of class type dcell that contain information about agents [Nt, Ny, Nx] 129 | - `profits.npy`: Numpy array of profits through time [Nt, Ny, Nx] 130 | 131 | 132 | ## Community involvement 133 | `janus` was built to be extensible. It is our hope that the community will continue the development of this software. Please submit a pull request for any work that you would like have considered as a core part of this package. You will be properly credited for your work and it will be distributed under our current open-source license. Any issues should be submitted through standard GitHub issue protocol and we will deal with these promptly. 134 | -------------------------------------------------------------------------------- /janus/preprocessing/get_nass_agent_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Mon Apr 8 22:15:11 2019 3 | 4 | @author: kek25 5 | 6 | Functions to create pdfs from NASS Data, and assign agent attributes to dictionaries 7 | """ 8 | import nass 9 | import pandas as pd 10 | import numpy as np 11 | 12 | 13 | def cleanup(value): 14 | """Massage data into proper form. 15 | 16 | :param value: Value returned from NASS query 17 | :type value: String 18 | 19 | :return: Numeric value without commas or spaces 20 | :type: Float 21 | 22 | """ 23 | try: 24 | return int(value.replace(',', '')) 25 | 26 | except ValueError: 27 | return 0 28 | 29 | 30 | def ages(nass_yr, state, nass_api_key): 31 | """ Pulls age data from the NASS data set for a given state and year 32 | 33 | :param nass_yr: Year to pull data from, NASS data is collected every 5 years (e.g. 2007, 2012) 34 | :type nass_yr: Int 35 | :param state: State from which to pull NASS data, must be capitalized abbreviation (e.g. 'ID' for Idaho) 36 | :type state: String 37 | :param nass_api_key: Personal API key to retrieve from NASS website 38 | :type nass_api_key: String 39 | 40 | :return: number of farmers in each age category 41 | :type: Numpy Array 42 | 43 | """ 44 | api = nass.NassApi(nass_api_key) 45 | q = api.query() 46 | 47 | # prepare lists for data 48 | age_cat = ["AGE LT 25", "AGE 25 TO 34", "AGE 35 TO 44", "AGE 45 TO 54", "AGE 55 TO 64", "AGE 65 TO 74", "AGE GE 75"] 49 | q.filter('commodity_desc', 'OPERATORS').filter('state_alpha', state).filter('year', nass_yr).filter('class_desc', age_cat) 50 | age_df = pd.DataFrame(q.execute()) 51 | age_df['Value'] = age_df['Value'].apply(cleanup) 52 | 53 | ages = pd.DataFrame(0, index=np.arange(len(age_df)), columns=('category', 'operators')) 54 | ages['category'] = age_cat.copy() 55 | 56 | for i in range(len(age_df)): 57 | # state level aggregation 58 | vals = age_df[(age_df['class_desc'] == ages.loc[i, 'category'])] 59 | ages.loc[i, 'operators'] = int(vals['Value']) 60 | 61 | return ages 62 | 63 | 64 | def tenure_area(state, county_list, nass_yr, variables, nass_api_key): 65 | """Aggregation of county level tenure status and associated area from domain of interest" 66 | 67 | :param state: State from which to pull NASS data, must be capitalized abbreviation (e.g. 'ID' for Idaho) 68 | :type state: String 69 | :param county_list: List of county names to pull NASS data, must be all capitalized 70 | :type county_list: List of strings 71 | :param nass_yr: Year to pull data from, NASS data is collected every 5 years (e.g. 2007, 2012) 72 | :type nass_yr: Int 73 | :param variables: List of variables of interest 74 | :type variables: List of strings 75 | :param nass_api_key: Personal API key to retrieve from NASS website 76 | :type nass_api_key: String 77 | 78 | :return: Categories of tenure status with number of agents in each status, and number 79 | of operations within each area category 80 | :type: Numpy Array 81 | """ 82 | api = nass.NassApi(nass_api_key) 83 | q = api.query() 84 | 85 | q.filter('commodity_desc', 'FARM OPERATIONS').filter('state_alpha', state).filter('year', nass_yr).filter('domain_desc', variables).filter('county_name', county_list) 86 | data = q.execute() 87 | tenure_df = pd.DataFrame(data) 88 | tenure_df['Value'] = tenure_df['Value'].apply(cleanup) 89 | 90 | # prepare lists for data 91 | area_cat = ["AREA OPERATED: (1.0 TO 9.9 ACRES)","AREA OPERATED: (10.0 TO 49.9 ACRES)", "AREA OPERATED: (50.0 TO 69.9 ACRES)", "AREA OPERATED: (70.0 TO 99.9 ACRES)", "AREA OPERATED: (100 TO 139 ACRES)","AREA OPERATED: (140 TO 179 ACRES)", "AREA OPERATED: (180 TO 219 ACRES)", "AREA OPERATED: (220 TO 259 ACRES)", "AREA OPERATED: (260 TO 499 ACRES)", "AREA OPERATED: (500 TO 999 ACRES)", "AREA OPERATED: (1,000 TO 1,999 ACRES)", "AREA OPERATED: (2,000 OR MORE ACRES)"]#, "AREA OPERATED: (50 TO 179 ACRES)", "AREA OPERATED: (180 TO 499 ACRES)", "AREA OPERATED: (1,000 OR MORE ACRES)"] 92 | tenure_cat = ["TENURE: (FULL OWNER)", "TENURE: (PART OWNER)", "TENURE: (TENANT)" ] 93 | cat = tenure_cat + area_cat 94 | 95 | farms = pd.DataFrame(0, index=np.arange(len(cat)), columns=('category', 'acres', 'operations')) 96 | farms['category'] = cat 97 | 98 | for i in range(len(cat)): 99 | sub = tenure_df[(tenure_df['domaincat_desc'] == farms.loc[i,'category']) & (tenure_df['unit_desc'] == 'ACRES')] 100 | farms.loc[i, 'acres'] = sum(sub['Value']) # acres 101 | sub2 = tenure_df[(tenure_df['domaincat_desc'] == farms['category'][i]) & (tenure_df['unit_desc'] == 'OPERATIONS')] 102 | 103 | # operations 104 | farms.loc[i, 'operations'] = sum(sub2['Value']) 105 | 106 | return farms 107 | 108 | 109 | def make_age_cdf(var_array): 110 | """Create cdf distribution from NASS age data. 111 | 112 | :param var_array: Data returned from ages function 113 | :type var_array: Numpy Array 114 | 115 | :return: Age and percent likelihood of being in that category 116 | :type: Numpy Array 117 | """ 118 | 119 | ser_full = np.zeros(0) 120 | 121 | var_array['low'] = [18, 25, 35, 45, 55, 65, 75] 122 | var_array['high'] = [25, 35, 45, 55, 65, 75, 86] 123 | 124 | # create a full series of ages based on number in each category 125 | for i in np.arange(7): 126 | ser = np.random.randint(var_array.low[i], high=var_array.high[i], size=var_array.operators[i]) 127 | ser_full = np.append(ser_full, ser) 128 | 129 | h, x1 = np.histogram(ser_full, bins=68, density=True) 130 | 131 | x2 = np.floor(x1) 132 | dx = x2[2] - x2[1] 133 | f1 = np.cumsum(h) * dx 134 | perc = np.column_stack((x2[1:], f1)) 135 | 136 | return perc 137 | 138 | 139 | def make_tenure_cdf(var_array): 140 | """ 141 | 142 | :param var_array: Data returned from tenure_area function 143 | :type var_array: Numpy array 144 | 145 | :return: Each tenure status and percent likelihood of being in that category 146 | :type: Numpy array 147 | 148 | """ 149 | 150 | ser_full = np.zeros(0) 151 | 152 | ser0 = np.zeros(var_array['operations'][0]) 153 | ser1 = np.ones(var_array['operations'][1]) 154 | ser2 = np.ones(var_array['operations'][2]) + 1 155 | ser_full = np.append(ser_full, ser0) 156 | ser_full = np.append(ser_full, ser1) 157 | ser_full = np.append(ser_full, ser2) 158 | 159 | h, x1 = np.histogram(ser_full, bins=3, density=True) 160 | dx = x1[2] - x1[1] 161 | f1 = np.cumsum(h) * dx 162 | perc = np.column_stack(([0, 1, 2], f1)) 163 | 164 | return perc 165 | 166 | 167 | def farmer_data(tenure_cdf, age_cdf, switch, d2c, attr, p): 168 | """Collect agent data from NASS distributions and place in dictionary. 169 | 170 | :param tenure_cdf: Data from make_tenure_cdf function. Full owner, Part Owner, Tenant 171 | :type tenure_cdf: Numpy Array 172 | :param age_cdf: Data from make_age_cdf function 173 | :type age_cdf: Numpy Array 174 | :param switch: List of lists of alpha beta parameters describing likelihood of switching crops 175 | :type switch: List of lists 176 | :param p: Percentage of farming agents that are switching averse 177 | :type p: Float 178 | :param d2c: Distance to city 179 | :type d2c: Numpy Array 180 | :param attr: Indicates whether or not to use switching curves based on tenure and age attributes 181 | :type attr: Boolean 182 | 183 | :return: Farmer data based on NASS data 184 | :type: Dictionary 185 | """ 186 | ts = np.random.random_sample() 187 | ageS = np.random.random_sample() 188 | 189 | if ageS < age_cdf[0, 1]: 190 | ageI = 18 191 | else: 192 | ageT = np.where(age_cdf[:, [1]] <= ageS) 193 | ageI = max(ageT[0]) 194 | 195 | tt = np.where(tenure_cdf[:, [1]] >= ts) 196 | ten_stat = min(tt[0]) 197 | 198 | if attr: 199 | if ten_stat == tenure_cdf[0, [0]]: # full owner, switching averse 200 | k = 0 201 | elif ten_stat == tenure_cdf[1, [0]]: # part owner, switching neutral 202 | k = 2 203 | elif ten_stat == tenure_cdf[2, [0]]: # tenant, switching tolerant 204 | k = 1 205 | a_alpha = switch[k][0] + (ageI-18)*0.005 # initiate switching as a function of age 206 | a_beta = switch[k][1] - (ageI-18)*0.0005 # initiate switching as a function of age 207 | else: 208 | ss = np.random.random_sample() 209 | if ss >= p: 210 | k = 0 # switching averse 211 | else: 212 | k = 1 # switching tolerant 213 | a_alpha = switch[k][0] 214 | a_beta = switch[k][1] 215 | 216 | agent_data = { 217 | "AgeInit": ageI, 218 | "LandStatus": ten_stat, 219 | "Alpha": a_alpha, 220 | "Beta": a_beta, 221 | "nFields": 1, 222 | "Dist2city": d2c 223 | } 224 | return agent_data 225 | 226 | 227 | def urban_data(lc): 228 | """Pull the land cover category from lc, set this so it's 0 =open space, 1=low density, 2=medium density, 3=high 229 | density, this needs to be set by user based on what their land cover classes are, e.g. density would not 230 | be a category with original GCAM categories. If these are changed from the local_GCAM categorization they will all 231 | be set to medium density 232 | 233 | :param lc: Land cover data 234 | :type lc: Numpy Array 235 | 236 | :return: Urban agent attributes, currently only density 237 | :type: Dictionary 238 | 239 | """ 240 | if lc == 17: 241 | d = 3 242 | elif lc == 25: 243 | d = 2 244 | elif lc == 26: 245 | d = 1 246 | elif lc == 27: 247 | d = 0 248 | else: d=2 249 | 250 | agent_data = {"Density": d} 251 | 252 | return agent_data 253 | -------------------------------------------------------------------------------- /janus/crop_functions/crop_decider.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Tue Jul 9 12:12:43 2019 3 | 4 | @author: lejoflores & kendrakaiser 5 | 6 | Suite of functions to make decisions about what crop to plant 7 | """ 8 | 9 | import numpy as np 10 | import scipy.special as sp 11 | 12 | 13 | def define_seed(seed): 14 | """ Creates seed for random selection for testing 15 | :param seed: Seed value 16 | 17 | :return: Global seed value 18 | 19 | """ 20 | global seed_val 21 | 22 | seed_val = seed 23 | 24 | return 25 | 26 | 27 | def switching_prob_curve(alpha, beta, fmin, fmax, n, profit): 28 | """ Creates probability curves that show likelihood of switching crops based on profits 29 | :param alpha: Alpha parameter for the incomplete beta distribution 30 | :type alpha: Float 31 | 32 | :param beta: Beta parameter for the incomplete beta distribution 33 | :type beta: Float 34 | 35 | :param fmin: Fraction of current profit at which the CDF of the beta distribution is zero 36 | :type fmin: Float 37 | 38 | :param fmax: Fraction of current profit at which the CDF of the beta distribution is one 39 | :type fmax: Float 40 | 41 | :param n: Number of points to generate in the CDF 42 | :type n: Int 43 | 44 | :param profit: Current profit 45 | :type profit: Float 46 | 47 | :return: [0] numpy array; n points spaced linearly between fmin * profit and fmax * profit 48 | [1] numpy array; associated points of the beta distribution as specified by alpha and beta 49 | """ 50 | x = np.linspace(0, 1.0, num=n) 51 | 52 | fx = sp.betainc(alpha, beta, x) 53 | 54 | x2 = np.linspace(fmin * profit, fmax * profit, num=n) 55 | 56 | return x2, fx 57 | 58 | 59 | def decide2switch(alpha, beta, fmin, fmax, n, profit, profit_p): 60 | """ This decides whether to retain current crop or switch to one other option 61 | 62 | :param alpha: Alpha parameter for the incomplete beta distribution 63 | :type alpha: Float 64 | :param beta: Beta parameter for the incomplete beta distribution 65 | :type beta: Float 66 | :param fmin: Fraction of current profit at which the CDF of the beta distribution is zero 67 | :type fmin: Float 68 | :param fmax: Fraction of current profit at which the CDF of the beta distribution is one 69 | :type fmax: Float 70 | :param n: Number of points to generate in the CDF 71 | :type n: Int 72 | :param profit: Current profit the farmer experiences 73 | :type profit: Float 74 | :param profit_p: Potential profit of the alternative crop being evaluated 75 | :type profit_p: Float 76 | :return: A binary flag indicating whether or not to switch crops (1 = switch, 0 = do not switch) 77 | :type: Int 78 | 79 | """ 80 | if profit_p > profit: 81 | 82 | x, fx = switching_prob_curve(alpha, beta, fmin, fmax, n, profit) 83 | 84 | prob_switch = np.interp(profit_p, x, fx) 85 | 86 | if (np.random.rand(1) < prob_switch): # need to send it seed in the unit test 87 | return 1 # Switch 88 | else: 89 | return 0 # Do not switch 90 | 91 | else: 92 | return 0 # Do not switch if not profitable 93 | 94 | 95 | def assess_profit(crop, profits_current, profit_signals, num_crops, crop_ids): 96 | """Get the potential profits from the next time step and set the last profit equal to the current profit 97 | 98 | :param crop: Current crop choice 99 | :type crop: Int 100 | 101 | :param profits_current: Profit from current crop choice 102 | :type profits_current: Float 103 | 104 | :param profit_signals: A vector of profits against which current profit will be assessed 105 | :type profit_signals: Vector 106 | 107 | :param num_crops: The number of crops in the vector of Profit_signals 108 | :type num_crops: Int 109 | 110 | :param crop_ids: The associated vector of crop IDs associated with the input profit signal 111 | :param crop_ids: Vector 112 | 113 | :return: [0] Float; profit for a particular crop (Crop) from the last time step 114 | [1] Numpy array; potential profits for the current time step 115 | 116 | """ 117 | 118 | # Existing Crop ID 119 | cur_crop_choice_ind = crop.astype('int') 120 | 121 | # assess current and future profit of that given crop 122 | if np.isin(cur_crop_choice_ind, crop_ids): # if the current land cover is a crop 123 | profit_last = profits_current # last years profit in this location 124 | profit_expected = profit_signals.reshape(num_crops, 1) # next years anticipated profit 125 | 126 | else: 127 | profit_last = 0 128 | profit_expected = np.zeros((num_crops, 1)) 129 | 130 | return profit_last, profit_expected 131 | 132 | 133 | def profit_maximizer(alpha, beta, fmin, fmax, n, profits_current, vec_crops, vec_profit_p, rule=True): 134 | """ Decide which crop and associated profit to pick out of N options. 135 | 136 | :param alpha: Alpha parameter for the incomplete beta distribution 137 | :type alpha: Float 138 | 139 | :param beta: Beta parameter for the incomplete beta distribution 140 | :type beta: Float 141 | 142 | :param fmin: Fraction of current profit at which the CDF of the beta distribution is zero 143 | :type fmin: Int 144 | 145 | :param fmax: Fraction of current profit at which the CDF of the beta distribution is one 146 | :type fmax: Int 147 | 148 | :param n: Number of points to generate in the CDF 149 | :type n: Int 150 | 151 | :param profits_current: Current profit 152 | :type profits_current: Float 153 | 154 | :param vec_crops: A vector of potential alternative crops 155 | :type vec_crops: Number of crops x1 vector 156 | 157 | :param vec_profit_p: A vector of potential profits associated with the alternatives contained in vec_crops 158 | :type vec_profit_p: Number of crops x1 vector 159 | 160 | :param rule: A boolean indicating whether, if multiple alternative crops are viably 161 | more profitable, to choose the most profitable alternative (True), 162 | or select randomly between all viable alternatives. 163 | 164 | :return: [0] Integer; denoting crop choice and 165 | [1] Float; associated profit 166 | 167 | """ 168 | # Key assumptions: the vector of crop IDs and anticipated profits associated 169 | # with each crop must both be N x 1 column vectors. 170 | assert (vec_crops.shape == vec_profit_p.shape), \ 171 | 'Supplied vector of crop IDs and potential profits must be identical' 172 | assert (vec_crops.shape[1] == 1), \ 173 | 'Supplied vector of crop IDs and potential profits must be N x 1' 174 | 175 | # Create a boolean vector to store a 0 or 1 if the farmer will select the 176 | # crop (==1) or not (==0) 177 | AccRej = np.zeros(vec_crops.shape, dtype='int') 178 | 179 | for i in np.arange(AccRej.size): 180 | # Determine whether or not the crop is viable 181 | AccRej[i] = decide2switch(alpha, beta, fmin, fmax, n, profits_current, 182 | vec_profit_p[i]) 183 | 184 | # Find the Crop IDs and associated profits that were returned as "viable": decide2switch came back as "yes" == 1 185 | ViableCrops = vec_crops[AccRej == 1] 186 | ViableProfits = vec_profit_p[AccRej == 1] 187 | 188 | if (ViableCrops.size == 0): 189 | return -1, -1 190 | 191 | # Find the maximum anticipated profit and the crop IDs associated with that 192 | MaxProfit = ViableProfits.max() 193 | MaxProfitCrop = ViableCrops[ViableProfits == MaxProfit] 194 | 195 | # This should be rare: if there happen to be more than one viable 196 | # crops that carry the same anticipated profit that also coincides with 197 | # the maximum anticipated profit. The choice here is to choose randomly 198 | # from among those crops that have the same maximum profit 199 | if (MaxProfitCrop.size > 1): 200 | ViableCrops = MaxProfitCrop 201 | ViableProfits = ViableProfits[ViableProfits == MaxProfit] 202 | rule = False # Switch rule to make the algorithm using the random option 203 | 204 | if (rule): # Return crop with largest profit 205 | CropChoice = MaxProfitCrop 206 | ProfitChoice = MaxProfit 207 | 208 | else: # Choose randomly from among all viable crops 209 | indChoice = np.random.choice(np.arange(ViableCrops.size), size=1) 210 | CropChoice = ViableCrops[indChoice] 211 | ProfitChoice = ViableProfits[indChoice] 212 | 213 | # Return the crop choice and associated profit 214 | return CropChoice, ProfitChoice 215 | 216 | 217 | def make_choice(crop_id_last, profit_last, crop_choice, profit_choice, seed=False): 218 | """ Compare the crop choice with associated profit, set the new crop ID if switching, add variability to the 219 | anticipated profit 220 | 221 | :param crop_id_last: The crop choice from the last time step 222 | :type crop_id_last: Int 223 | 224 | :param profit_last: The profit from the last time step associated with that crop 225 | :type profit_last: Float 226 | 227 | :param crop_choice: A flag indicating whether the new crop is selected 228 | :type crop_choice: Int 229 | 230 | :param profit_choice: A flag indicating whether there is a profitable alternative 231 | :type profit_choice: Int 232 | 233 | :param seed: A boolean indicating whether or not to use a random seed 234 | :type seed: Bool 235 | 236 | :return: [0] Int; selected crop ID 237 | :type: [1] Float; crop profit 238 | """ 239 | 240 | if seed: 241 | 242 | try: 243 | seed_val 244 | except NameError: 245 | print("Random seed needs to be initialized using the CropDecider.DefineSeed() Function") 246 | 247 | np.random.seed(seed_val) 248 | 249 | # Check if return values indicate the farmer shouldn't switch 250 | if (crop_choice == -1) and (profit_choice == -1): 251 | crop_id_next = crop_id_last 252 | profit_act = profit_last + np.random.normal(loc=0.0, scale=1000.0, size=(1, 1, 1)) # this years actual profit 253 | 254 | else: # switch to the new crop and add variability to resulting profit 255 | crop_id_next = crop_choice 256 | profit_act = profit_choice + np.random.normal(loc=0.0, scale=1000.0, size=(1, 1, 1)) 257 | 258 | return crop_id_next, profit_act 259 | -------------------------------------------------------------------------------- /janus/postprocessing/create_figures.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Mon Aug 12 15:35:49 2019 3 | 4 | @author: Kendra Kaiser 5 | """ 6 | 7 | import os 8 | 9 | import numpy as np 10 | import pandas as pd 11 | import matplotlib.pyplot as plt 12 | import matplotlib.animation as animation 13 | import seaborn as sns 14 | from matplotlib.colors import ListedColormap 15 | from matplotlib.patches import Patch 16 | 17 | import janus.agents.farmer as farmer 18 | import janus.crop_functions.crop_decider as crpdec 19 | from collections import Counter 20 | 21 | 22 | def create_animation(crop_id_all, nt): 23 | """ Create gif of land cover over time 24 | 25 | :param crop_id_all: numpy array of land cover over time 26 | :param nt: number of time steps 27 | 28 | :return: Animation of crops over time 29 | 30 | """ 31 | ims = [] 32 | 33 | fig = plt.figure(figsize=(12, 12)) 34 | 35 | for t in np.arange(nt): 36 | im = plt.imshow(crop_id_all[t, :, :], interpolation='none') 37 | ims.append([im]) 38 | 39 | ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000) 40 | 41 | ani.save('CropID_vs_Time.gif') 42 | 43 | 44 | def plot_crop_percent(crop_id_all, CropIDs, nt, nc, scale, results_path, key_file, ag_cats): 45 | """Stack plot of crops over time 46 | 47 | :param crop_id_all: numpy array of gridded land cover over time 48 | :param CropIDs: numpy array of the crop identification numbers 49 | :param nt: number of time steps 50 | :param nc: number of crops 51 | :param scale: scale of cells within domain 52 | :param results_path: path to local results folder 53 | :param key_file: key file that has conversions from CDL to GCAM or local categories 54 | :param ag_cats: categories that are agricultural 55 | 56 | :return: image saved to results folder of the percentage of each crop over time 57 | 58 | """ 59 | ag_area = np.empty(shape=(nc, nt)) 60 | for t in np.arange(nt): 61 | cur_crop = crop_id_all[t, :, :] 62 | for c in np.arange(nc): 63 | bools = (cur_crop == CropIDs[c]) 64 | ag_area[c, t] = np.sum(bools) 65 | 66 | agTot = np.sum(ag_area, axis=0) 67 | percentages = np.zeros((nc, nt)) 68 | for c in np.arange(nc): 69 | for t in np.arange(nt): 70 | CropIx = CropIDs[c] 71 | percentages[c, t] = np.sum((crop_id_all[t, :, :] == CropIx)) / agTot[t] * 100.0 72 | t = np.arange(nt) 73 | 74 | plt.rcParams.update({'font.size': 16}) 75 | fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12)) 76 | 77 | # pull any crops planted in the time series for legend 78 | active_crops = np.any(percentages, axis=1) 79 | ag = np.transpose(np.array(ag_cats)) 80 | ac = np.array(ag[active_crops]).flatten() 81 | 82 | clrs = ["powder blue", "windows blue", "royal blue", "sand", "grey blue", "greyish", "amber", "light gold", 83 | "faded green", "washed out green", "pea soup", "rose", "light grey", "dark teal", "jungle green", 84 | "dusty purple", "black", "bright purple", "green", "crimson", "eggshell","red orange", "burple", 85 | "battleship grey","black", 'black'] 86 | cc = dict(enumerate(clrs)) 87 | cl = [cc[x] for x in ac] 88 | col = sns.xkcd_palette(cl) 89 | 90 | ax.stackplot(t, percentages[active_crops, :], baseline='wiggle', labels=key_file['local_GCAM_Name'][ac], colors=col) 91 | 92 | ax.set_xlim([0, nt - 1]) 93 | # ax.set_ylim([0, 90]) 94 | ax.grid() 95 | ax.legend(loc='upper right') 96 | 97 | ax.set_ylabel('Percent Crop Choice') 98 | ax.set_xlabel('Time [yr]') 99 | 100 | output_figure = os.path.join(results_path, 'CropPercentages_{}m_{}yr.pdf'.format(scale, nt)) 101 | 102 | plt.savefig(output_figure, dpi=300, facecolor='w', edgecolor='w', bbox_inches='tight') 103 | plt.close() 104 | 105 | 106 | def plot_agent_ages(domain, AgentArray, Ny, Nx, nt, scale, results_path): 107 | """Histogram of agent ages at end of model run 108 | 109 | :param domain: Domain with agent data 110 | :param AgentArray: Numpy array with identifiers of which agent is in each cell 111 | :param Ny: Number of rows 112 | :param Nx: Number of columns 113 | :param nt: Number of time steps 114 | :param scale: Scale of cells within domain 115 | :param results_path: Path to local results folder 116 | 117 | :return: Image saved to results folder of a histogram of farmer ages at the end of the model run 118 | 119 | """ 120 | 121 | FarmerAges = [] 122 | for i in np.arange(Ny): 123 | 124 | for j in np.arange(Nx): 125 | 126 | if AgentArray[i, j] == farmer.Farmer.__name__: 127 | FarmerAges = np.append(FarmerAges, domain[i, j].FarmerAgents[0].Age) 128 | 129 | plt.rcParams.update({'font.size': 16}) 130 | plt.hist(FarmerAges) 131 | 132 | output_figure = os.path.join(results_path, 'AgentAges_{}m_{}yr.png'.format(scale, nt)) 133 | plt.savefig(output_figure, dpi=300, facecolor='w', edgecolor='w', bbox_inches='tight') 134 | plt.close() 135 | 136 | 137 | def plot_switching_curves(domain, AgentArray, fmin, fmax, Ny, Nx, nt, n, scale, results_path, profits, switch_params): 138 | """Histogram of agent ages at end of model run 139 | 140 | :param domain: Domain with agent data 141 | :param AgentArray: Numpy array with identifiers of which agent is in each cell 142 | :param fmin: The fraction of current profit at which the CDF of the beta distribution is zero 143 | :param fmax: The fraction of current profit at which the CDF of the beta distribution is one 144 | :param Ny: Number of rows 145 | :param Nx: Number of columns 146 | :param nt: Number of time steps 147 | :param n: The number of points to generate in the CDF 148 | :param scale: Scale of cells within domain 149 | :param profits: Numpy array of profits from the last time step 150 | :param results_path: path to local results folder 151 | 152 | :return: Image saved to results folder of a histogram of farmer ages at the end of the model run 153 | 154 | """ 155 | 156 | alpha_params = [] 157 | beta_params = [] 158 | profit_act = [] 159 | 160 | for i in np.arange(Ny): 161 | for j in np.arange(Nx): 162 | if AgentArray[i, j] == farmer.Farmer.__name__: 163 | alpha_params = np.append(alpha_params, domain[i, j].FarmerAgents[0].alpha) 164 | beta_params = np.append(beta_params, domain[i, j].FarmerAgents[0].beta) 165 | profit_act = np.append(profit_act, profits[i, j]) 166 | 167 | col = [0] * len(alpha_params) 168 | for i in np.arange(len(alpha_params)): 169 | if alpha_params[i] >= switch_params[0][0]: 170 | col[i] = 'k' 171 | else: 172 | col[i] = 'b' 173 | 174 | out = [0] * len(alpha_params) 175 | for i in np.arange(len(alpha_params)): 176 | out[i] = crpdec.switching_prob_curve(alpha_params[i], beta_params[i], fmin, fmax, n, 1000) # profit_act[i] 177 | 178 | # TODO: Why is the x scale so large? 179 | plt.rcParams.update({'font.size': 16}) 180 | # TODO: make these a multi-plot 181 | ax = plt.axes() 182 | for i in np.arange(len(out)): 183 | ax.plot(out[i][0], out[i][1], color=col[i]) 184 | 185 | ax.set_ylabel('Probability of switching') 186 | ax.set_xlabel('Profit') 187 | # plt.hist(alpha_params) 188 | # plt.text(250, 4, Counter(col).keys()[0]':'Counter(col).values()) 189 | # plt.set_ylabel('Alpha') 190 | # plt.set_xlabel('Count') 191 | 192 | output_figure = os.path.join(results_path, 'Switching_curves_{}m_{}yr.png'.format(scale, nt)) 193 | plt.savefig(output_figure, dpi=300, facecolor='w', edgecolor='w', bbox_inches='tight') 194 | plt.close() 195 | 196 | 197 | def plot_price_signals(price_file, key, year, nt, results_path, profits_type): 198 | labs = key['local_GCAM_Name'][key['GCAM_price_id'].notna()] 199 | ts = np.arange(year, year + nt) 200 | 201 | ax = plt.axes() 202 | for i in np.arange(len(price_file)): 203 | ax.plot(ts, price_file[i, :]) # , color=col[i]) 204 | # TODO: add legend in 205 | # ax.legend(loc='lower right') 206 | ax.set_ylabel('Crop Price $ per km2') 207 | ax.set_xlabel('Time [yr]') 208 | 209 | output_figure = os.path.join(results_path, '{}_price_signals.pdf'.format(profits_type)) 210 | plt.savefig(output_figure, dpi=300, facecolor='w', edgecolor='w', bbox_inches='tight') 211 | plt.close() 212 | 213 | 214 | def plot_lc(crop_id_all, t, year, results_path, ag_cats, CropIDs, nc, nt, key_file): 215 | """ Create spatial plot of land cover at a certain time 216 | 217 | :param crop_id_all: numpy array of land cover over time 218 | :param t: time step to plot 219 | 220 | :return: Spatial plot of land cover 221 | 222 | """ 223 | percentages = np.zeros((nc, nt)) 224 | #print(CropIDs) 225 | key_file['local_GCAM_id_list'] 226 | for c in np.arange(nc): 227 | for j in np.arange(nt): 228 | CropIx = CropIDs[c] 229 | percentages[c, j] = np.sum((crop_id_all[j, :, :] == CropIx)) 230 | # pull any crops planted in the time series for plotting 231 | active_crops = np.any(percentages, axis=1) 232 | active_crops.astype(np.int) 233 | sub = CropIDs[active_crops] 234 | #print(sub) 235 | ac = np.array(sub).flatten() 236 | #print(ac) 237 | 238 | #ag = np.transpose(np.array(ag_cats)) 239 | #ac = np.array(ag[active_crops]).flatten() 240 | #xclrs = ["white", "powder blue", "windows blue", "royal blue", "sand", "grey blue", "greyish", "amber", "light gold", 241 | # "faded green", "washed out green", "pea soup", "rose", "light grey", "dark teal", "jungle green", 242 | # "dusty purple", "black", "bright purple", "jungle green", "crimson", "egg shell","red orange"] 243 | clrs = ["white", "powder blue", "windows blue", "royal blue", "sand", "grey blue", "greyish", "amber", "light gold", 244 | "faded green", "washed out green", "pea soup", "rose", "light grey", "dark teal", "jungle green", 245 | "dusty purple", "black", "bright purple", "jungle green", "crimson", "egg shell","red orange"] 246 | cc = dict(enumerate(clrs)) 247 | #print(cc) 248 | cl = [cc[x] for x in ac] 249 | #print(cl) 250 | 251 | legend_labels = {"Corn":"xkcd:powder blue", "Wheat":"xkcd:windows blue", "Dry Beans": "xkcd:royal blue", "Root/Tuber": "xkcd:sand", "Oil Crop":"xkcd:grey blue", "Sugar Crop": "xkcd:greyish", "Other Grain":"xkcd:amber", "Onions":"xkcd:light gold", "Fodder Grass": "xkcd:faded green","FodderHerb": "xkcd:washed out green", "Peas":"xkcd:pea soup", "Misc crop": "xkcd:rose", "Other":"xkcd:light grey", "Sod":"xkcd:dark teal", "Pasture":"xkcd:jungle green", "Hops":"xkcd:dusty purple", "Stone/Pomme Fruit":"xkcd:bright purple", "Urban":"xkcd:black","Grapes":"xkcd:crimson", "Mint":"xkcd:red orange"} 252 | 253 | xcol = sns.xkcd_palette(cl) 254 | col = ListedColormap(xcol.as_hex()) 255 | crops = crop_id_all[t, :, :] 256 | crops = crops.astype('float') 257 | crops[crops == 0] = 'nan' 258 | 259 | plt.rcParams.update({'font.size': 16}) 260 | fig, ax = plt.subplots(figsize=(14, 12)) 261 | ax.imshow(crops, interpolation='none', cmap=col) 262 | patches = [Patch(color=color, label=label) 263 | for label, color in legend_labels.items()] 264 | 265 | ax.legend(handles=patches, 266 | bbox_to_anchor=(1.35, 1), 267 | facecolor="white") 268 | output_figure = os.path.join(results_path, 'landcover_{}.pdf'.format(year + t)) 269 | plt.savefig(output_figure, dpi=300) 270 | plt.close() 271 | -------------------------------------------------------------------------------- /janus/model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Agent Based Model of Land Use and Land Cover Change 3 | 4 | @author: Kendra Kaiser & Lejo Flores 5 | 6 | @license: BSD 2-Clause 7 | """ 8 | 9 | import argparse 10 | from datetime import datetime 11 | import os 12 | import subprocess 13 | 14 | import netCDF4 as netcdf 15 | import numpy as np 16 | import gdal, osr 17 | 18 | import janus.preprocessing.geofxns as gf 19 | import janus.crop_functions.crop_decider as crpdec 20 | import janus.initialize_agents_domain as init_agent 21 | import janus.postprocessing.create_figures as ppf 22 | import janus.preprocessing.get_nass_agent_data as get_nass 23 | 24 | from janus.config_reader import ConfigReader 25 | 26 | try: 27 | import pkg_resources 28 | except ImportError: 29 | pass 30 | 31 | 32 | class Janus: 33 | 34 | def __init__(self, config_file=None, args=None, save_result=True, plot_results=True): 35 | 36 | if (args is not None) and (config_file is None): 37 | 38 | # if config file used, read it in; else, use args from user 39 | try: 40 | self.c = ConfigReader(args.config_file) 41 | 42 | except AttributeError: 43 | self.c = args 44 | 45 | except TypeError: 46 | raise TypeError("Must pass either a configuration file or required parameters.") 47 | 48 | elif (args is None) and (config_file is None): 49 | 50 | raise RuntimeError("Must pass either a configuration file or required parameters.") 51 | 52 | else: 53 | 54 | self.c = ConfigReader(config_file) 55 | 56 | # initialize landscape and domain 57 | self.lc, self.dist2city, self.domain, self.Ny, self.Nx = self.initialize_landscape_domain() 58 | 59 | # initialize crops 60 | self.crop_ids, self.crop_id_all, self.ag, self.num_crops = self.initialize_crops() 61 | 62 | # initialize profits 63 | self.profits_actual, self.profit_signals = self.initialize_profit() 64 | 65 | # initialize agents 66 | self.agent_domain, self.agent_array = self.initialize_agents() 67 | 68 | # make agent decisions 69 | self.decisions() 70 | 71 | # plot results 72 | if plot_results: 73 | self.plot_results() 74 | 75 | # save outputs 76 | if save_result: 77 | self.save_outputs() 78 | 79 | def initialize_landscape_domain(self): 80 | """Initialize landscape and domain. 81 | 82 | :return: lc, numpy array of land cover categories within domain at scale of interest 83 | :return: dist2city, numpy array of distance to nearest city cell 84 | :return: domain, grid of dCell classes 85 | :return: ny, number of rows in domain 86 | :return: nx, number of columns in domain 87 | """ 88 | 89 | # import the initial land cover data 90 | lc_raster = gdal.Open(self.c.f_init_lc_file) 91 | lc = lc_raster.GetRasterBand(1).ReadAsArray() 92 | 93 | ny, nx = lc.shape 94 | 95 | # initialize minimum distance to city 96 | dist2city = gf.min_dist_city(lc) 97 | 98 | domain = init_agent.initialize_domain(ny, nx) 99 | 100 | return lc, dist2city, domain, ny, nx 101 | 102 | def initialize_crops(self): 103 | """Initialize crops 104 | 105 | :return: [0] numpy array; crop IDs that are in the domain 106 | [1] numpy array; crop_id_all, land cover categories through time 107 | [2] numpy array; ag, identifies where agricultural cells exist in the domain 108 | [3] integer; num_crops, number of crops being assessed 109 | 110 | """ 111 | 112 | ag = np.where(self.c.key_file['local_cat'] == 'ag') 113 | 114 | crop_ids_load = np.int64(self.c.key_file['local_GCAM_id_list'][ag[0]]) 115 | 116 | num_crops = len(crop_ids_load) 117 | 118 | crop_ids = crop_ids_load.reshape(num_crops, 1) 119 | 120 | crop_id_all = np.zeros((self.c.Nt, self.Ny, self.Nx)) 121 | 122 | crop_id_all[0, :, :] = self.lc 123 | 124 | return crop_ids, crop_id_all, ag, num_crops 125 | 126 | def initialize_profit(self): 127 | """Initialize profits based on profit signals csv that is either generated or input from other model output 128 | 129 | :return: [0] Numpy Array; profits_actual, profit signal with a random variation 130 | [1] Numpy Array; profit_signals, transposed profit signals cleaned to be used in other functions 131 | 132 | """ 133 | if self.c.profits == 'generated': 134 | 135 | profit_signals = np.transpose(self.c.profits_file.values) 136 | 137 | assert np.all([profit_signals[:, 0], self.crop_ids[:, 0]]), 'Crop IDs in profit signals do not match ' \ 138 | 'Crop IDs from land cover' 139 | profit_signals = profit_signals[:, 1:] 140 | 141 | elif self.c.profits == 'gcam': 142 | profit_signals = np.transpose(self.c.gcam_profits_file.values) 143 | else: 144 | print("Profit type not supported") 145 | 146 | assert profit_signals.shape[1] == self.c.Nt, 'The number of time steps in the profit signals do not ' \ 147 | 'match the number of model time steps' 148 | 149 | profits_actual = init_agent.init_profits(profit_signals, self.c.Nt, self.Ny, self.Nx, self.crop_id_all, self.crop_ids) 150 | 151 | return profits_actual, profit_signals 152 | 153 | def initialize_agents(self, cat_option='local'): 154 | """Initialize agents based on NASS data and initial land cover 155 | 156 | :param cat_option: Denotes which categorization option is used, 'GCAM', 'local', or user defined 157 | :type cat_option: String 158 | 159 | :return agent domain: [0] Numpy array; agent_domain, domain with agent cell classes filled with agent info 160 | [1] Numpy array; agent_array, strings that define which agent is in each location 161 | """ 162 | 163 | tenure = get_nass.tenure_area(self.c.state, self.c.nass_county_list, self.c.nass_year, self.c.agent_variables, 164 | self.c.nass_api_key) 165 | 166 | ages = get_nass.ages(self.c.nass_year, self.c.state, self.c.nass_api_key) 167 | 168 | age_cdf = get_nass.make_age_cdf(ages) 169 | 170 | tenure_cdf = get_nass.make_tenure_cdf(tenure) 171 | 172 | agent_array = init_agent.place_agents(self.Ny, self.Nx, self.lc, self.c.key_file, cat_option) 173 | 174 | agent_domain = init_agent.agents(agent_array, self.domain, self.dist2city, tenure_cdf, age_cdf, self.c.switch, 175 | self.Ny, self.Nx, self.lc, self.c.attr, self.c.p) 176 | 177 | return agent_domain, agent_array 178 | 179 | def decisions(self): 180 | """Decision process. 181 | 182 | :return: Updated domain with agent information and land cover choice 183 | :type: Numpy Array 184 | 185 | """ 186 | for i in np.arange(1, self.c.Nt): 187 | 188 | for j in np.arange(self.Ny): 189 | 190 | for k in np.arange(self.Nx): 191 | 192 | if self.agent_domain[j, k].FarmerAgents: 193 | 194 | # assess profit 195 | profit_last, profit_pred = crpdec.assess_profit(self.crop_id_all[i-1, j, k], 196 | self.profits_actual[i-1, j, k], 197 | self.profit_signals[:, i], 198 | self.num_crops, 199 | self.crop_ids) 200 | 201 | # identify the most profitable crop 202 | crop_choice, profit_choice = crpdec.profit_maximizer(self.agent_domain[j, k].FarmerAgents[0].alpha, 203 | self.agent_domain[j, k].FarmerAgents[0].beta, 204 | self.c.fmin, 205 | self.c.fmax, 206 | self.c.n, 207 | profit_last, 208 | self.crop_ids, 209 | profit_pred, 210 | rule=True) 211 | 212 | # decide whether to switch and add random variation to actual profit 213 | self.crop_id_all[i, j, k], self.profits_actual[i, j, k] = crpdec.make_choice(self.crop_id_all[i-1, j, k], 214 | profit_last, 215 | crop_choice, 216 | profit_choice, 217 | seed = False) 218 | 219 | # update agent attributes 220 | self.agent_domain[j, k].FarmerAgents[0].update_age() 221 | if self.c.attr: 222 | if self.agent_domain[j, k].FarmerAgents[0].LandStatus != 2: 223 | self.agent_domain[j, k].FarmerAgents[0].update_switch() 224 | 225 | def plot_results(self): 226 | """Create result plots and save them.""" 227 | 228 | ppf.plot_crop_percent(self.crop_id_all, self.crop_ids, self.c.Nt, self.num_crops, self.c.scale, 229 | self.c.output_dir, self.c.key_file, self.ag) 230 | 231 | ppf.plot_agent_ages(self.agent_domain, self.agent_array, self.Ny, self.Nx, self.c.Nt, 232 | self.c.scale, self.c.output_dir) 233 | 234 | ppf.plot_switching_curves(self.agent_domain, self.agent_array, self.c.fmin, self.c.fmax, self.Ny, self.Nx, 235 | self.c.Nt, self.c.n, self.c.scale, self.c.output_dir, 236 | self.profits_actual[self.c.Nt-1, :, :], self.c.switch) 237 | 238 | ppf.plot_lc(self.crop_id_all, 0, self.c.target_year, self.c.output_dir, self.ag, self.crop_ids, self.num_crops, self.c.Nt, self.c.key_file) 239 | 240 | ppf.plot_lc(self.crop_id_all, 1, self.c.target_year, self.c.output_dir, self.ag, self.crop_ids, self.num_crops, self.c.Nt, self.c.key_file) 241 | ppf.plot_lc(self.crop_id_all, 29, self.c.target_year, self.c.output_dir, self.ag, self.crop_ids, self.num_crops, self.c.Nt, self.c.key_file) 242 | 243 | ppf.plot_price_signals(self.profit_signals, self.c.key_file, self.c.target_year, self.c.Nt, self.c.output_dir, self.c.profits) 244 | 245 | def version_info(self): 246 | v = 'janus-unknown' 247 | try: 248 | v = pkg_resources.require("janus")[0].version 249 | except: 250 | # not installed, *probably* in a development location, try some git 251 | # magic 252 | try: 253 | v = subprocess.check_output(['git', 'describe', '--tags']).strip() 254 | except: 255 | pass 256 | return v 257 | 258 | def save_outputs(self): 259 | """Save outputs as Numpy arrays (backwards compatible) and a netcdf file. 260 | 261 | The dimensions of each output NumPy array are [Number of time steps, Ny, Nx] 262 | 263 | For the netcdf file, the initial conditions are prepended to the output 264 | and all grids have the dimenstions Nt, Ny, Nx. 265 | """ 266 | 267 | out_file = os.path.join(self.c.output_dir, '{}_{}m_{}yr.npy') 268 | 269 | # save time series of land cover coverage 270 | np.save(out_file.format('landcover', self.c.scale, self.c.Nt), self.crop_id_all) 271 | 272 | # save time series of profits 273 | np.save(out_file.format('profits', self.c.scale, self.c.Nt), self.profits_actual) 274 | 275 | # save domain, can be used for initialization 276 | np.save(out_file.format('domain', self.c.scale, self.c.Nt), self.agent_domain) 277 | 278 | out_file = self.c.output_file 279 | if out_file == "": 280 | return 281 | years = [self.c.target_year + n for n in range(self.c.Nt)] 282 | 283 | # TODO(kyle): handle appending/assumed restart 284 | nc = netcdf.Dataset(out_file, 'w', format='NETCDF4') 285 | 286 | jmd = nc.createVariable('janus', 'i4') 287 | jmd.version = self.version_info() 288 | 289 | # TODO(kyle): determine what other metadata to write to the output, and 290 | # which are required to enable a restart. This may include data files, 291 | # config files/options, etc. 292 | 293 | now = datetime.now().strftime('%Y%m%dT%H%M%S') 294 | jmd.history = 'created {}'.format(now) 295 | 296 | # Grab metadata from the original landcover file for geo referencing 297 | lc = gdal.Open(self.c.f_init_lc_file) 298 | gt = lc.GetGeoTransform() 299 | # GetProjectionRef() is safe for GDAL 2.x and 3.x, use GetSpatialRef() + 300 | # export as needed for GDAL 3.0+. 301 | srs = lc.GetProjectionRef() 302 | lc = None 303 | 304 | # set gdal based crs 305 | sr = nc.createVariable('spatial_ref', 'i4') 306 | sr.spatial_ref = srs 307 | 308 | # store number of pixel/lines in a more convenient variable 309 | # TODO(kyle): check axis ordering 310 | nx = self.crop_id_all.shape[0] 311 | ny = self.crop_id_all.shape[1] 312 | 313 | # setup the dimensions for the output file. 314 | # TODO(kyle): check on axis ordering 315 | # time is unlimited for appending 316 | dt = nc.createDimension('time', None) 317 | dx = nc.createDimension('x', nx) 318 | dy = nc.createDimension('y', ny) 319 | 320 | time = nc.createVariable('time', 'u2', ('time',)) 321 | time.units = 'years' 322 | time.long_name = 'time' 323 | time[:] = years[:] 324 | 325 | # TODO(kyle): more axis ordering (-dy, dy, etc.), as well as half pixel 326 | # offsets. 327 | x = nc.createVariable('x', 'f8', ('x',)) 328 | x.long_name = 'x coordinate' 329 | x[:] = [gt[0] + (0.5 * gt[1]) + i * gt[1] for i in range(nx)] 330 | y = nc.createVariable('y', 'f8', ('y',)) 331 | y.long_name = 'y coordinate' 332 | y[:] = [gt[3] + (0.5 * gt[5]) + i * gt[5] for i in range(ny)] 333 | 334 | lon = nc.createVariable('lon', 'f8', ('y', 'x')) 335 | lon.long_name = 'longitude' 336 | lon.units = 'degrees' 337 | lat = nc.createVariable('lat', 'f8', ('y', 'x')) 338 | lat.long_name = 'latitude' 339 | lat.units = 'degrees' 340 | 341 | # TODO(kyle): discuss how to handle errors. If ImportFromWkt fails, 342 | # should we write the netcdf file without the geo-referencing? For 343 | # now, just let gdal raise an exception. 344 | gdal.UseExceptions() 345 | s_srs = osr.SpatialReference() 346 | s_srs.ImportFromWkt(srs) 347 | t_srs = osr.SpatialReference() 348 | t_srs.ImportFromEPSG(4326) 349 | if gdal.VersionInfo() >= '3000000': 350 | t_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) 351 | 352 | ct = osr.CoordinateTransformation(s_srs, t_srs) 353 | for i in range(ny): 354 | for j in range(nx): 355 | #TODO(kyle): do we need 1/2 pixel offsets? 356 | y = gt[3] + (0.5 * gt[5]) + i * gt[5] 357 | x = gt[0] + (0.5 * gt[1]) + j * gt[1] 358 | lon[i, j], lat[i, j], z = ct.TransformPoint(x, y) 359 | 360 | # TODO(kyle): check on range of crop values, cursory inspection showed 361 | # a max of 254, but it could have been the wrong file. 362 | crop = nc.createVariable('crop', 'i2', ('time', 'y', 'x')) 363 | crop.units = 'gcam(?) crop classification' 364 | crop.long_name = 'crop landcover' 365 | # prepend the initial conditions 366 | crop[:] = np.insert(self.crop_id_all, 0, self.lc, axis=0) 367 | crop.grid_mapping = 'spatial_ref' 368 | 369 | profits = nc.createVariable('profits', 'f8', ('time', 'y', 'x')) 370 | profits.units = 'total dollars (per unit area?)' 371 | profits.long_name = 'profit' 372 | # prepend zeros for the initial crop conditions 373 | profits[:] = np.insert(self.profits_actual, 0, np.zeros(self.lc.shape), axis=0) 374 | profits.grid_mapping = 'spatial_ref' 375 | 376 | # TODO(kyle): write the domain data 377 | 378 | nc.close() 379 | # This is a broader issue, discussion needed. 380 | gdal.DontUseExceptions() 381 | 382 | if __name__ == '__main__': 383 | 384 | parser = argparse.ArgumentParser() 385 | 386 | parser.add_argument('-c', '--config_file', type=str, help='Full path with file name and extension to YAML configuration file.') 387 | parser.add_argument('-s', '--switch_params', type=list, help='List of lists for switching averse, tolerant, and neutral parameters (alpha, beta)') 388 | parser.add_argument('-nt', '--nt', type=int, help='Number of time steps') 389 | parser.add_argument('-attr', '--attr', type=str, help='Boolean that determines if switching parameters are based on attributes') 390 | parser.add_argument('-f_init_lc', ' f_init_lc') 391 | 392 | # TODO: number of crops is calculated after doing the GIS pre-processing, if nc is needed for price generation, we might need to adjust this 393 | parser.add_argument('-nc', '--nc', type=int, help='Number of crops') 394 | parser.add_argument('-fmin', '--fmin', type=float, help='The fraction of current profit at which the CDF of the beta distribution is zero') 395 | parser.add_argument('-fmax', '--fmax', type=float, help='The fraction of current profit at which the CDF of the beta distribution is one') 396 | parser.add_argument('-n', '--n', type=int, help='The number of points to generate in the CDF') 397 | parser.add_argument('-seed', '--crop_seed_size', type=int, help='Seed to set for random number generators for unit testing') 398 | parser.add_argument('-yr', '--initialization_yr', type=int, help='Initialization year assocciated with landcover input') 399 | parser.add_argument('-state', '--state', type=str, help='State where NASS data is pulled from, capitalized acronym') 400 | parser.add_argument('-sc', '--scale', type=int, help='Scale of landcover grid in meters. Current options are 1000 and 3000 m') 401 | 402 | parser.add_argument('-av', '--agent_variables', type=list, help='NASS variables to characterize agents with. Currently set to use "TENURE" and "AREA OPERATED"') 403 | parser.add_argument('-nyr', '--nass_year', type=int, help='Year that NASS data are pulled from. This data is collected every 5 years, with the inital year here being 2007') 404 | parser.add_argument('-ncy', '--nass_county_list', type=list, help='List of counties in the domain that NASS data is collected from, these have to be entirely capatalized') 405 | parser.add_argument('-api', '--nass_api_key', type=int, help='A NASS API is needed to access the NASS data, get yours here https://quickstats.nass.usda.gov/api') 406 | 407 | args = parser.parse_args() 408 | 409 | Janus(args=args) 410 | -------------------------------------------------------------------------------- /janus/preprocessing/landcover_preprocessing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Mon Nov 19 09:53:01 2018 3 | 4 | @author: kek25 5 | 6 | All functions necessary to do GIS pre-processing for Janus 7 | """ 8 | 9 | import gdal 10 | import glob 11 | import numpy as np 12 | import geopandas as gp 13 | import pandas as pd 14 | import os 15 | import json 16 | from osgeo import osr 17 | from joblib import Parallel, delayed 18 | from shapely.geometry import Polygon, MultiPolygon 19 | from fiona.crs import from_epsg 20 | from rasterio.mask import mask 21 | from shapely.ops import cascaded_union 22 | import rasterio 23 | 24 | 25 | # =============================================================================# 26 | # PREAMBLE AND PATH DEFINITIONS 27 | # =============================================================================# 28 | 29 | class CdlDataStruct: 30 | """Attributes of input CDL data, location, file name, and all georeferencing information 31 | :param cdl_path: Full path to the CDl data 32 | :type cdl_path: String 33 | :param cdl_infile: Filename of CDL data 34 | :type cdl_infile: String 35 | 36 | """ 37 | 38 | # Constructor requires the path and file name of the input CDL data 39 | def __init__(self, cdl_path, cdl_infile): 40 | self.cdl_path = cdl_path 41 | self.cdl_infile = cdl_infile 42 | 43 | def set_cdl_proj(self, geo_transform, projection, pixel_size): 44 | """ Add CDL geographic transformation and projection information 45 | :param geo_transform: Transformation information 46 | :type geo_transform: String 47 | :param projection: Projection of data 48 | :type projection: String 49 | :param pixel_size: Size of pixels in meters 50 | :type pixel_size: Int 51 | 52 | """ 53 | self.cdl_geotransform = geo_transform 54 | self.cdl_projection = projection 55 | self.cdl_pixelsize = pixel_size 56 | 57 | def set_cdl_grid(self, cdl_grid): 58 | """Add the 2D data to the class structure 59 | :param cdl_grid: Land cover data 60 | :type cdl_grid: Numpy Array 61 | """ 62 | self.cdl_grid = cdl_grid 63 | 64 | def set_cdl_stats(self, cdl_stats): 65 | """ Add the stats calculated to the class 66 | :param cdl_stats: Number of cells that have each crop type 67 | :type cdl_stats: Numpy Array 68 | """ 69 | self.cdl_stats = cdl_stats 70 | 71 | 72 | class GCAM_DataStruct: 73 | """Attributes of the GCAM data, location, file name, and all georeferencing information 74 | :param gcam_path: Full path to the GCAM data 75 | :type gcam_path: String 76 | :param gcam_outfile: Filename to save the GCAM data 77 | :type gcam_outfile: String 78 | 79 | """ 80 | def __init__(self, gcam_path, gcam_outfile): 81 | self.gcam_path = gcam_path 82 | self.gcam_outfile = gcam_outfile 83 | 84 | def set_gcam_proj(self, geo_transform, projection, pixel_size): 85 | """ Set the projection of the GCAM data 86 | :param geo_transform: Transformation information 87 | :type geo_transform: String 88 | :param projection: Projection of data 89 | :type projection: String 90 | :param pixel_size: Size of pixels in meters 91 | :type pixel_size: Int 92 | """ 93 | self.gcam_geotransform = geo_transform 94 | self.gcam_projection = projection 95 | self.gcam_pixelsize = pixel_size 96 | 97 | def set_gcam_stats(self, gcam_stats): 98 | """ Add the stats calculated to the class 99 | :param gcam_stats: Number of cells that have each crop type 100 | :type gcam_stats: Numpy Array 101 | """ 102 | self.gcam_stats = gcam_stats 103 | 104 | def set_gcam_grid(self, gcam_grid): 105 | """Add the reclassified 2D data to the class structure 106 | :param gcam_grid: Land cover data 107 | :type gcam_grid: Numpy Array 108 | """ 109 | self.gcam_grid = gcam_grid 110 | 111 | 112 | # =============================================================================# 113 | # FUNCTION DEFINITIONS 114 | # =============================================================================# 115 | 116 | 117 | def read_arc_grid(cdl_struct): 118 | """ Reads in ArcGrid file for processing 119 | :param cdl_struct: CDL class 120 | :type cdl_struct: Class 121 | """ 122 | 123 | # Construct the full name of the CDL input ArcGrid file 124 | cdl_file = os.path.join(cdl_struct.cdl_path, cdl_struct.cdl_infile) 125 | 126 | # Open the CDL input file using GDAL 127 | cdl_input = gdal.Open(cdl_file) 128 | cdl_struct.set_cdl_proj(cdl_input.GetGeoTransform(), cdl_input.GetProjection(), cdl_input.GetGeoTransform()[1]) 129 | 130 | cdl_grid = np.float64(cdl_input.ReadAsArray()) 131 | cdl_grid[cdl_grid == -9999] = np.nan 132 | cdl_struct.set_cdl_grid(cdl_grid) 133 | 134 | # Close CDL data set to save memory 135 | cdl_input = None 136 | 137 | return 138 | 139 | 140 | def cdl2gcam(cdl_struct, cdl_cat, gcam_struct, gcam_cat): 141 | """ Convert raster of CDL land cover to GCAM categories 142 | 143 | :param cdl_struct: Raster of CDL land cover 144 | :type cdl_struct: Class 145 | 146 | :param cdl_cat: CDL input crop categories 147 | :type cdl_cat: Vector 148 | 149 | :param gcam_struct: Raster for GCAM land cover 150 | :type gcam_struct: Class 151 | 152 | :param gcam_cat: GCAM output crop categories 153 | :type gcam_cat: Vector 154 | 155 | :return: New land cover raster with GCAM categories 156 | :type: Class 157 | 158 | """ 159 | 160 | cdl_stats = np.zeros(132) 161 | gcam_stats = np.zeros(28) 162 | 163 | gcam_grid = np.nan * np.ones(cdl_struct.cdl_grid.shape) # new blank np array 164 | for i in np.arange(cdl_cat.size): # unique cdl categories 165 | indx, indy = np.where(cdl_struct.cdl_grid == cdl_cat[i]) 166 | gcam_grid[indx, indy] = gcam_cat[i] 167 | cdl_stats[i] = indx.size 168 | 169 | for i in np.arange(28): # count of each gcam category 170 | indx, indy = np.where(gcam_grid == i + 1) 171 | gcam_stats[i] = indx.size 172 | 173 | cdl_struct.set_cdl_stats(cdl_stats) 174 | 175 | gcam_struct.set_gcam_proj(cdl_struct.cdl_geotransform, cdl_struct.cdl_projection, cdl_struct.cdl_pixelsize) 176 | gcam_struct.set_gcam_stats(gcam_stats) 177 | gcam_struct.set_gcam_grid(gcam_grid) 178 | 179 | return 180 | 181 | 182 | def save_gcam_grid(gcam_struct): 183 | """ Creates outfile name, applies correct projection and saves raster 184 | 185 | :param gcam_struct: Input GCAM structure 186 | :type: Class 187 | 188 | :return: Saved raster file 189 | :type: Tiff 190 | 191 | """ 192 | 193 | gcam_grid = gcam_struct.gcam_grid 194 | nrows, ncols = np.shape(gcam_grid) 195 | 196 | gcam_outfile = os.path.join(gcam_struct.gcam_path, gcam_struct.gcam_outfile) 197 | 198 | gcam_driver = gdal.GetDriverByName('Gtiff') 199 | gcam_gdal = gcam_driver.Create(gcam_outfile, ncols, nrows, 1, gdal.GDT_Float32) 200 | 201 | proj = osr.SpatialReference() 202 | proj.ImportFromEPSG(4326) # Needed as an intermediate because no initial projection defined 203 | gcam_gdal.SetProjection(proj.ExportToWkt()) 204 | gcam_gdal.SetGeoTransform(gcam_struct.gcam_geotransform) 205 | gcam_gdal.GetRasterBand(1).WriteArray(gcam_struct.gcam_grid) 206 | gdal.Warp(gcam_outfile, gcam_gdal, dstSRS='EPSG:32611') 207 | 208 | gcam_gdal.FlushCache() 209 | gcam_gdal = None 210 | 211 | return 212 | 213 | 214 | def c2g(cdl_gcam_keyfile, gcam_output_path, cdl_input_path, conversion_id): 215 | """ Converts CDL categories to GCAM categories 216 | 217 | :param cdl_gcam_keyfile: File that links CDL categories to new GCAM categories, users may modify this for 218 | inclusion of local crops 219 | :type cdl_gcam_keyfile: CSV 220 | 221 | :param gcam_output_path: Path to save gcam output 222 | :type gcam_output_path: String 223 | 224 | :param cdl_input_path: Path to raw CDL data 225 | :type cdl_input_path: String 226 | 227 | :param conversion_id: String specifying which GCAM categories to use, options are 'local_GCAM_id' or 'GCAM_id' 228 | for regular GCAM categories 229 | :type conversion_id: String 230 | 231 | :return: Saved land cover rasters with user defined GCAM categories 232 | :type: Tiff 233 | """ 234 | 235 | # =========================================================================# 236 | # 0. Read in category data and create vectors # 237 | # =========================================================================# 238 | 239 | cdl2gcam_key = pd.read_csv(cdl_gcam_keyfile, sep=',') 240 | cdl_cat = cdl2gcam_key['CDL_id'].values 241 | gcam_cat = cdl2gcam_key[conversion_id].values 242 | 243 | # =========================================================================# 244 | # 1. Initialize a list of CDL structures for analysis # 245 | # =========================================================================# 246 | files = glob.glob(os.path.join(cdl_input_path, 'cdl*.txt')) 247 | cdl_data = [] 248 | gcam_data = [] 249 | for file in files: 250 | # Initialize CDL data structures with paths and file names 251 | cdl_path = os.path.dirname(file) 252 | cdl_infile = os.path.basename(file) 253 | cdl_data.append(CdlDataStruct(cdl_path, cdl_infile)) 254 | 255 | # Initialize GCAM data structures with paths and file names 256 | gcam_outfile = cdl_infile.replace('cdl', 'gcam') 257 | gcam_outfile = gcam_outfile.replace('txt', 'tiff') 258 | gcam_data.append(GCAM_DataStruct(gcam_output_path, gcam_outfile)) 259 | 260 | # =========================================================================# 261 | # 2a. Read in all the CDL files and store data in cdl_dataStruct # 262 | # =========================================================================# 263 | Parallel(n_jobs=6, verbose=60, backend='threading')(delayed(read_arc_grid)(cdl_data[i]) 264 | for i in np.arange(len(cdl_data))) 265 | 266 | # =========================================================================# 267 | # 2b. Perform the CDL-GCAM category conversion # 268 | # =========================================================================# 269 | Parallel(n_jobs=6, verbose=10, backend='threading')(delayed(cdl2gcam)(cdl_data[i], cdl_cat, gcam_data[i], gcam_cat) 270 | for i in np.arange(len(cdl_data))) 271 | 272 | # =========================================================================# 273 | # 2c. Save re categorized GCAM grids to files # 274 | # =========================================================================# 275 | Parallel(n_jobs=6, verbose=30, backend='threading')(delayed(save_gcam_grid)(gcam_data[i]) 276 | for i in np.arange(len(cdl_data))) 277 | 278 | # =========================================================================# 279 | # 3. Create Arrays of Results 280 | # =========================================================================# 281 | f = len(files) 282 | cdl_stats = np.zeros((132, f)) 283 | gcam_stats = np.zeros((28, f)) 284 | 285 | for i in np.arange(f): 286 | cdl_stats[:, i] = cdl_data[i].cdl_stats 287 | gcam_stats[:, i] = gcam_data[i].gcam_stats 288 | np.savetxt(os.path.join(gcam_output_path, "cdl_initial.csv"), cdl_stats, delimiter=",") 289 | np.savetxt(os.path.join(gcam_output_path, "gcam_initial.csv"), gcam_stats, delimiter=",") 290 | 291 | 292 | # =============================================================================# 293 | # Aggregate to scale of interest 294 | # =============================================================================# 295 | 296 | 297 | def aggregate_grid(input_file, scale, year): 298 | """ Create grid that land cover data is saved in when aggregating from smaller scale to larger scale 299 | 300 | :param input_file: Full path and filename of input land cover 301 | :type input_file: String 302 | 303 | :param scale: Resolution to aggregate data to in meters, suggested at 1000 or 3000 304 | :type scale: Int 305 | 306 | :param year: Year that land cover is being initialized from 307 | :type year: Int 308 | 309 | :return: New land cover raster at a specified resolution 310 | :type: Tiff 311 | 312 | """ 313 | 314 | # Open the GeoTiff based on the input path and file 315 | src_ds = gdal.Open(input_file) 316 | 317 | # Create the name of the output file by modifying the input file 318 | gcam_write_file = 'gcam_' + str(int(scale)) + '_domain_' + str(int(year)) + '.tiff' 319 | 320 | # Get key info on the source data set 321 | src_ncols = src_ds.RasterXSize 322 | src_nrows = src_ds.RasterYSize 323 | 324 | src_geot = src_ds.GetGeoTransform() 325 | src_proj = src_ds.GetProjection() 326 | src_res = src_ds.GetGeoTransform()[1] 327 | 328 | agg_factor = scale / src_res 329 | 330 | dst_ncols = int(src_ncols / agg_factor) 331 | dst_nrows = int(src_nrows / agg_factor) 332 | 333 | dst_driver = gdal.GetDriverByName('Gtiff') 334 | output = os.path.join(os.path.dirname(input_file), gcam_write_file) 335 | dst_ds = dst_driver.Create(output, dst_ncols, dst_nrows, 1, gdal.GDT_Float32) 336 | 337 | dst_geot = (src_geot[0], src_geot[1] * agg_factor, src_geot[2], src_geot[3], src_geot[4], src_geot[5] * agg_factor) 338 | 339 | dst_ds.SetGeoTransform(dst_geot) 340 | dst_ds.SetProjection(src_proj) 341 | 342 | gdal.ReprojectImage(src_ds, dst_ds, src_proj, src_proj, gdal.GRA_Mode) 343 | 344 | src_ds = None 345 | dst_ds = None 346 | 347 | return 348 | 349 | 350 | # =============================================================================# 351 | # Run aggregation function in parallel 352 | # =============================================================================# 353 | 354 | 355 | def agg_gcam(scale, lc_dir, year): 356 | """Runs aggregation function in parallel 357 | 358 | :param scale: Resolution to aggregate data to in meters, suggested at 1000 or 3000 359 | :type scale: Int 360 | 361 | :param lc_dir: Directory where GCAM land cover data is stored 362 | :type lc_dir: String 363 | 364 | :param year: Year that land cover is being initialized from 365 | :type year: Int 366 | 367 | :return: saved land cover data at new resolution 368 | :type: Tiff 369 | """ 370 | 371 | gcam_read_files = glob.glob(os.path.join(lc_dir, 'gcam_' + str(int(year)) + '*.tiff')) 372 | 373 | Parallel(n_jobs=4, verbose=60, backend='threading')(delayed(aggregate_grid)(file, scale, year) 374 | for file in gcam_read_files) 375 | 376 | 377 | # =============================================================================# 378 | # Create a set of polygons for entire domain 379 | # =============================================================================# 380 | 381 | 382 | def grid2poly(year, scale, gcam_dir, out_path): 383 | """Creates a grid of polygons for holding information in each cell 384 | 385 | :param year: Initiation year for identifying GCAM raster 386 | :type year: Int 387 | 388 | :param scale: Scale of grid for identifying correct GCAM raster 389 | :type scale: Int 390 | 391 | :param gcam_dir: Location of GCAM file 392 | :type gcam_dir: String 393 | 394 | :param out_path: path for output file 395 | :type out_path: String 396 | 397 | :return: Saved grid of polygon 398 | :type: ESRI Shapefile 399 | """ 400 | 401 | grid_file = os.path.join(gcam_dir, 'gcam_' + str(int(scale)) + '_domain_' + str(int(year)) + '.tiff') 402 | 403 | src = gdal.Open(grid_file) 404 | srcarray = src.ReadAsArray().astype(np.float) 405 | 406 | x_index = np.arange(srcarray.shape[1]) 407 | y_index = np.arange(srcarray.shape[0]) 408 | (upper_left_x, x_size, x_rotation, upper_left_y, y_rotation, y_size) = src.GetGeoTransform() 409 | x_coords = x_index * x_size + upper_left_x + (x_size / 2) # add half the cell size 410 | y_coords = y_index * y_size + upper_left_y + (y_size / 2) # to centre the point 411 | xc, yc = np.meshgrid(x_coords, y_coords) 412 | 413 | # create a list of all the polygons in the grid 414 | vert = list() 415 | for i in np.arange(srcarray.shape[1] - 1): 416 | for j in np.arange(srcarray.shape[0] - 1): 417 | vert.append([[xc[j, i], yc[j, i]], [xc[j + 1, i], yc[j + 1, i]], [xc[j + 1, i + 1], yc[j + 1, i + 1]], 418 | [xc[j, i + 1], yc[j, i + 1]]]) 419 | 420 | # create list of polygons 421 | polygons = [Polygon(vert[i]) for i in np.arange(len(vert))] 422 | 423 | # convert them to formats for exporting 424 | polys = gp.GeoSeries(MultiPolygon(polygons)) 425 | polyagg = gp.GeoDataFrame(geometry=polys) 426 | polyagg.crs = from_epsg(32611) 427 | 428 | # -------------------------# 429 | # Save Output # 430 | # -------------------------# 431 | out_file_name = os.path.join(out_path, 'domain_poly_' + str(int(scale)) + '.shp') 432 | polyagg.to_file(filename=out_file_name, driver="ESRI Shapefile") 433 | 434 | 435 | # ============================================================================= # 436 | # Get extent of modeling domain # 437 | # ============================================================================= # 438 | 439 | 440 | def get_extent(counties_shp, county_list, scale, out_path): 441 | """Create a grid of the extent based on counties and scale of interest. 442 | This will only be used if a user wants to use and clip other geospatial data such as elevation 443 | 444 | :param counties_shp: Geopandas data frame of counties 445 | :type counties_shp: Geopandas data frame 446 | 447 | :param county_list: List of counties in the domain of interest 448 | :type county_list: List of strings 449 | 450 | :param scale: Grid scale of output, can only be 3000 or 1000 (meters) 451 | :type scale: Int 452 | 453 | :param out_path: File path to processed lc data folder 454 | :type out_path: String 455 | 456 | :return: Grid of polygons for the domain of interest 457 | :type: ESRI Shapefile 458 | """ 459 | 460 | if scale == 3000: 461 | srb_poly = gp.read_file(os.path.join(out_path, 'domain_poly_3000.shp')) 462 | 463 | elif scale == 1000: 464 | srb_poly = gp.read_file(os.path.join(out_path, 'domain_poly_1000.shp')) 465 | 466 | # this returns geometry of the union, no longer distinguishes counties - see issue #1 467 | 468 | # this is the row index, not the "COUNTY_ALL" index 469 | extent = counties_shp['geometry'].loc[county_list].unary_union 470 | extent_poly = srb_poly[srb_poly.geometry.intersects(extent)] 471 | out_filename = 'extent_' + str(int(scale)) + '.shp' 472 | extent_poly.to_file(os.path.join(out_path, out_filename)) 473 | 474 | 475 | # ------------------------------------------------------------------------------------------------ 476 | # Clip GCAM coverage to the counties of interest at scale of interest & save for later use # 477 | # ------------------------------------------------------------------------------------------------ 478 | 479 | def get_gcam(counties_shp, county_list, gcam_path, out_path): 480 | """Clip GCAM coverage to the counties of interest at scale of interest. 481 | 482 | :param counties_shp: Geopandas data frame for counties data 483 | :type counties_shp: Geopandas data frame 484 | 485 | :param county_list: List of counties in the domain of interest 486 | :type county_list: List of strings 487 | 488 | :param gcam_path: Full path with file name and extension to the GCAM raster 489 | :type gcam_path: String 490 | 491 | :param out_path: Path to the directory where the file will be saved for use in Janus 492 | :type out_path: String 493 | 494 | :return: Land cover data clipped to domain of interest 495 | :type: Tiff 496 | """ 497 | 498 | data = rasterio.open(gcam_path) 499 | extent_shp = counties_shp['geometry'].loc[county_list] 500 | boundary = gp.GeoSeries(cascaded_union(extent_shp)) 501 | coords = [json.loads(boundary.to_json())['features'][0][ 502 | 'geometry']] # parses features from GeoDataFrame the way rasterio wants them 503 | out_img, out_transform = mask(dataset=data, shapes=coords, crop=True) 504 | out_meta = data.meta.copy() 505 | 506 | # update metadata with new transformation information 507 | out_meta.update({"driver": "GTiff", 508 | "height": out_img.shape[1], 509 | "width": out_img.shape[2], 510 | "transform": out_transform}) 511 | 512 | # Merge original file name with init_landcover to denote that it is the initial land cover data for Janus 513 | in_file = os.path.basename(gcam_path) 514 | out_filename = os.path.join(out_path, 'init_landcover_' + in_file) 515 | 516 | # Save clipped land cover coverage 517 | out_tiff = rasterio.open(out_filename, 'w', **out_meta) 518 | out_tiff.write(np.squeeze(out_img, 0), 1) 519 | out_tiff.close() 520 | 521 | return 522 | -------------------------------------------------------------------------------- /janus/preprocessing/generate_synthetic_prices.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Sep 16 14:23:05 2019 5 | 6 | @author: lejoflores 7 | """ 8 | 9 | # TODO: Need to create RunProfitGenerator in config file. If value 0 = do not run GenerateSyntheticProfits. 10 | # (must specify profit profiles to read in). If 1 = run this script 11 | # (must specify profit generator parameter input file AND associate output file, which will then be 12 | # used to run the actual model) 13 | 14 | import numpy as np 15 | import sys 16 | import csv 17 | 18 | NPRICE_FUNCTIONS = 3 # Number of profit functions in this script. If the user 19 | # wants to add additional functions, this must be increased 20 | # to reflect the total number of synthetic profit functions 21 | # in this script. 22 | 23 | # =========================================================================== # 24 | # # 25 | # GeneratePrice_linear(Nt, Pi, Pf, perturb, s_p=0.0) # 26 | # # 27 | # AUTHOR: Lejo Flores # 28 | # # 29 | # PURPOSE: The purpose of this function is to generate a synthetic price # 30 | # signal that is Nt time steps long and changes linearly from the # 31 | # initial price, Pi, to the final price, Pf. Optionally, the user # 32 | # has the option to perturb that synthetic price with gaussian, # 33 | # uncorrelated, zero-mean noise with a standard deviation of s_p # 34 | # # 35 | # INPUTS: # 36 | # # 37 | # Nt = The number of time steps for which to create prices. # 38 | # Pi = The price at the start of the time series. # 39 | # Pf = The price at the end of the time series. # 40 | # perturb = A flag noting whether to perturb the times series with # 41 | # gaussian, uncorrelated, zero mean noise. 0 = false. # 42 | # 1 = true. # 43 | # s_p = The standard deviation of the noise that will be added # 44 | # to the prices if perturb is set to true. # 45 | # # 46 | # RETURNS: An (Nt x 1) numpy array of prices that vary linearly from Pi to # 47 | # Pt, optionally perturbed with gaussian, zero mean noise. # 48 | # # 49 | # ALGORITHM: # 50 | # # 51 | # (1) Error trap to ensure that Pi and Pf are greater than 0. # 52 | # (2) Use the numpy linspace function to create an (Nt x 1) array of # 53 | # prices that vary from Pi at the beginning of the array to Pf # 54 | # at the end of the array. # 55 | # (3) Check if the perturb flag is set to true # 56 | # (4) If true, add gaussian, uncorrelated, zero mean, s_p standard # 57 | # deviation noise to the vector. If false, do nothing. # 58 | # (5) Return the (Nt x 1) numpy array. # 59 | # # 60 | #=============================================================================# 61 | def GeneratePrice_linear(Nt, Pi, Pf, perturb, s_p=0.0): 62 | """Description 63 | 64 | :param Nt: Number of time steps in the model 65 | :param Pi: Profit at the beginning of the time series 66 | :param Pf: Profit at the end of the time series 67 | :param perturb: Perturbation flag. 0 = no random perturbations to profit. 1 = add zero mean, uncorrelated noise to every time step 68 | :param s_p: Standard deviation of noise added to profit signal at each time step (default = 0.0) 69 | 70 | :return: An Nt x 1 numpy array of profit 71 | """ 72 | assert Pi >= 0.0, 'generate_synthetic_prices.py ERROR: Pi must be greater than or equal to 0.0' 73 | assert Pf >= 0.0, 'generate_synthetic_prices.py ERROR: Pf must be greater than or equal to 0.0' 74 | 75 | P = np.linspace(Pi, Pf, num=Nt).reshape((Nt, 1)) 76 | if (perturb == 1): 77 | P += np.random.normal(loc=0.0, scale=s_p, size=(Nt, 1)) 78 | 79 | return P 80 | 81 | 82 | #=============================================================================# 83 | # # 84 | # GeneratePrice_step(Nt, Pi, Pf, t_step, perturb, s_p=0.0) # 85 | # # 86 | # AUTHOR: Lejo Flores # 87 | # # 88 | # PURPOSE: The purpose of this function is to generate a synthetic price # 89 | # signal that is Nt time steps long and undergoes a step-change in # 90 | # price from from the initial price, Pi, to the final price, Pf. # 91 | # Optionally, the user can perturb that synthetic price with # 92 | # gaussian, uncorrelated, zero-mean noise with a standard deviation # 93 | # of s_p. # 94 | # # 95 | # INPUTS: # 96 | # # 97 | # Nt = The number of time steps for which to create prices. # 98 | # Pi = The price at the start of the time series. # 99 | # Pf = The price at the end of the time series. # 100 | # t_step = The time (as a fraction of the number of time steps Nt) # 101 | # at which the price change occurs. # 102 | # perturb = A flag noting whether to perturb the times series with # 103 | # gaussian, uncorrelated, zero mean noise. 0 = false. # 104 | # 1 = true. # 105 | # s_p = The standard deviation of the noise that will be added # 106 | # to the prices if perturb is set to true. # 107 | # # 108 | # RETURNS: An (Nt x 1) numpy array of prices that undergo a step-change from # 109 | # Pi to Pt, optionally perturbed with gaussian, zero mean noise. # 110 | # # 111 | # ALGORITHM: # 112 | # # 113 | # (1) Error trap to ensure that t_step is valid (0 < t_step < 1) # 114 | # and that Pi and Pf are greater than or equal to zero. # 115 | # (2) Create an (Nt x 1) numpy array of zeros as an empty container. # 116 | # (3) Set the first (int)(t_step * Nt) prices equal to Pi. # 117 | # (4) Set the last (int)(t_step * Nt) to Nt prices equal to Pf. # 118 | # (5) Check if the perturb flag is set to true # 119 | # (6) If true, add gaussian, uncorrelated, zero mean, s_p standard # 120 | # deviation noise to the vector. If false, do nothing. # 121 | # (7) Return the (Nt x 1) numpy array. # 122 | # # 123 | #=============================================================================# 124 | def GeneratePrice_step(Nt, Pi, Pf, t_step, perturb, s_p=0.0): 125 | """Description 126 | 127 | :param Nt: Number of time steps in the model 128 | :param Pi: Profit prior to the step change 129 | :param Pf: Profit after the step change 130 | :param t_step: (0.0 to 1.0) the time at which the step change occurs as a fraction of Nt 131 | :param perturb: Perturbation flag. 0 = no random perturbations to profit. 1 = add zero mean, uncorrelated noise to every time step 132 | :param s_p: Standard deviation of noise added to profit signal at each time step (default = 0.0) 133 | 134 | :return: An Nt x 1 numpy array of profit 135 | """ 136 | assert Pi >= 0.0, 'generate_synthetic_prices.py ERROR: Pi must be greater than or equal to 0.0' 137 | assert Pf >= 0.0, 'generate_synthetic_prices.py ERROR: Pf must be greater than or equal to 0.0' 138 | assert t_step > 0.0, 'generate_synthetic_prices.py ERROR: Step price change time is less than 0.0' 139 | assert t_step < 1.0, 'generate_synthetic_prices.py ERROR: Step price change time is greater than 1.0' 140 | 141 | P = np.zeros((Nt, 1)) 142 | P[0:(int(t_step * Nt))] = Pi 143 | P[(int(t_step * Nt)):] = Pf 144 | 145 | if (perturb == 1): 146 | P += np.random.normal(loc=0.0, scale=s_p, size=(Nt, 1)) 147 | 148 | return P 149 | 150 | # TODO: have Lejo double check n_period description 151 | #=============================================================================# 152 | # # 153 | # GeneratePrice_periodic(Nt, Pmag, Pamp, n_period, perturb, s_p=0.0) # 154 | # # 155 | # AUTHOR: Lejo Flores # 156 | # # 157 | # PURPOSE: The purpose of this function is to generate a synthetic price # 158 | # signal that is Nt time steps long and varies periodically about a # 159 | # level Pmag, with an amplitude Pamp, over n_periods during the Nt # 160 | # time steps. Optionally, the user can perturb that synthetic price # # 161 | # with gaussian, uncorrelated, zero-mean noise with a standard # 162 | # deviation of s_p. # 163 | # # 164 | # INPUTS: # 165 | # # 166 | # Nt = The number of time steps for which to create prices. # 167 | # Pmag = The level about which prices fluctuate. # 168 | # Pamp = The amplitude of the fluctuation. # 169 | # n_period = The number of periods during the Nt time steps # 170 | # that the price fluctuations occur. # 171 | # perturb = A flag noting whether to perturb the times series with # 172 | # gaussian, uncorrelated, zero mean noise. 0 = false. # 173 | # 1 = true. # 174 | # s_p = The standard deviation of the noise that will be added # 175 | # to the prices if perturb is set to true. # 176 | # # 177 | # RETURNS: An (Nt x 1) numpy array of prices that fluctuates about Pmag with # 178 | # an amplitude Pamp, optionally perturbed with gaussian, zero mean # 179 | # noise. # 180 | # # 181 | # ALGORITHM: # 182 | # # 183 | # (1) Error trap to ensure that Pmag and Pamp are greater than 0. # 184 | # (2) Create an (Nt x 1) numpy array that is a sinusoidal signal # 185 | # with n_period periods over Nt time steps. # 186 | # (3) Scale and shift the sinusoid such that it fluctuates about # 187 | # Pmag with an amplitude Pamp. # 188 | # (4) Check if the perturb flag is set to true # 189 | # (5) If true, add gaussian, uncorrelated, zero mean, s_p standard # 190 | # deviation noise to the vector. If false, do nothing. # 191 | # (6) Return the (Nt x 1) numpy array. # 192 | # # 193 | #=============================================================================# 194 | def GeneratePrice_periodic(Nt, Pmag, Pamp, n_period, perturb, s_p=0.0): 195 | """Description 196 | 197 | :param Nt: Number of time steps in the model 198 | :param Pmag: Level about which profit fluctuates through time 199 | :param Pamp: Amplitude of profit fluctuation 200 | :param n_period: Number of periods during the Nt time steps. Can be negative to reflect sinusoid about Y axis. 201 | :param perturb: Perturbation flag. 0 = no random perturbations to profit. 1 = add zero mean, uncorrelated noise to every time step 202 | :param s_p: Standard deviation of noise added to profit signal at each time step (default = 0.0) 203 | 204 | :return: An Nt x 1 numpy array of profit 205 | """ 206 | 207 | assert Pmag > 0.0, 'generate_synthetic_prices.py ERROR: Pmag must be greater than 0' 208 | assert Pamp > 0.0, 'generate_synthetic_prices.py ERROR: Pamp must be greater than 0' 209 | 210 | x = np.linspace(0.0, n_period * 2 * np.pi, num=Nt).reshape((Nt, 1)) 211 | P = Pmag + Pamp * np.sin(x) 212 | 213 | if (perturb == 1): 214 | P += np.random.normal(loc=0.0, scale=s_p, size=(Nt, 1)) 215 | 216 | return P 217 | 218 | 219 | #=============================================================================# 220 | # # 221 | # main(argv) # 222 | # # 223 | # AUTHOR: Lejo Flores # 224 | # # 225 | # PURPOSE: The purpose of this main function is to read in input from a CSV # 226 | # file that contains instructions on how to generate synthetic # 227 | # prices for a variety of different crops and write those synthetic # 228 | # crop prices to a CSV file that is then read as input by the agent # 229 | # based model. # 230 | # INPUTS: # 231 | # # 232 | # argv = A character array that can be passed from the command # 233 | # line. It should contain 5 character strings, in order. # 234 | # argv[0] = The name of this function (generate_synthetic_prices). # 235 | # argv[1] = The string representation of the number of crops for # 236 | # which prices will be synthesized. # 237 | # argv[2] = The string representation of the number of time steps for# 238 | # which prices will be generated for every crop. # 239 | # argv[3] = The string containing the name of the CSV file that # 240 | # contains information on what mathematical form crop # 241 | # prices will take, and the associated parameters for that # 242 | # mathematical form for every crop. # 243 | # argv[4] = The string containing the name of the CSV file to which # 244 | # generated crop prices will be written. # 245 | # # 246 | # RETURNS: NULL - the output synthetic prices will be written to the CSV file # 247 | # contained in argv[4]. # 248 | # # 249 | # ALGORITHM: # 250 | # # 251 | # (1) Error trap for correct number of command line arguments, print # 252 | # usage statement. # 253 | # (2) Save command line arguments to variables, and error trap for # 254 | # validity of input. # 255 | # (3) Open the input file containing information on how prices will # 256 | # be generated, error trap. # 257 | # (4) Parse the input CSV file and read in the parameters. # 258 | # (5) Call the appropriate price generating function for each crop # 259 | # reading the associated parameters from that row of the CSV # 260 | # file. # 261 | # (6) Repeat step (5) for all crops. # 262 | # (7) Error trap that the number of crops for which prices are # 263 | # generated corresponds to the number of rows read in the input # 264 | # CSV file. # 265 | # (8) Open and write the generated crop prices to the output CSV # 266 | # file whose name is provided in the input. Note that if the # 267 | # output file exists, it will be overwritten. # 268 | # (9) Close the output file. This is the end of the function, there # 269 | # is no return. # 270 | # # 271 | #=============================================================================# 272 | def main(argv): 273 | """Description 274 | 275 | :param argv: Array of 5 command line arguments passed from the __main__ function 276 | :param argv[0]: Name of this function (generate_synthetic_prices) 277 | :param argv[1]: Number of crops expect to create profit time series for 278 | :param argv[2]: Number of time steps in the time series 279 | :param argv[3]: Name of CSV file that contains information about the crops, including parameters of the generator functions, for which profits are generated and including path if CSV file is in a different directory 280 | :param argv[4]: Name of CSV file to which profit time series will be written, including path to output if in a different directory than the script 281 | :return: null (output written to file) 282 | """ 283 | 284 | if (len(argv) != 5): 285 | print('\nERROR: Incorrect number of command line arguments\n') 286 | print('Usage: generate_synthetic_prices.py \n') 287 | print('\tgenerate_synthetic_prices.py = Name of this python script') 288 | print('\t = Number of crops to synthesize prices for') 289 | print('\t = Number of time steps to generate prices for') 290 | print('\t = CSV file containing crop information') 291 | print('\t (see documentation)') 292 | print('\t = CSV file in which to save output prices\n') 293 | sys.exit() 294 | 295 | Nc = int(argv[1]) 296 | Nt = int(argv[2]) 297 | CropFileIn = argv[3] 298 | CropFileOut = argv[4] 299 | 300 | # Error traps 301 | assert Nc > 0, 'generate_synthetic_prices.py ERROR: Negative number of crops encountered' 302 | assert Nt > 0, 'generate_synthetic_prices.py ERROR: Negative number of time steps encountered' 303 | assert Nc <= 28, 'generate_synthetic_prices.py ERROR: Too many crops encountered' 304 | 305 | # Try opening the CSV file provided as input 306 | try: 307 | fp = open(CropFileIn) 308 | except IOError as e: 309 | print('generate_synthetic_prices.py ERROR({0}): {1}'.format(e.errno, e.strerror)) 310 | 311 | csv_fp = csv.reader(fp) 312 | 313 | crop_names = [] 314 | crop_ids = [] 315 | 316 | CropCount = 0 317 | 318 | for row in csv_fp: 319 | 320 | CropCount += 1 321 | 322 | assert isinstance(row[0], str), 'generate_synthetic_prices.py ERROR: Crop name not string' 323 | crop_names.append(row[0]) 324 | 325 | assert int(row[1]) > 0, 'generate_synthetic_prices.py ERROR: Negative crop ID number' 326 | crop_ids.append(int(row[1])) 327 | 328 | assert int(row[2]) > 0, 'generate_synthetic_prices.py ERROR: Invalid price function behavior flag' 329 | assert int(row[2]) < ( 330 | NPRICE_FUNCTIONS + 1), 'generate_synthetic_prices.py ERROR: Invalid price function behavior flag' 331 | 332 | price_fxn_type = int(row[2]) 333 | 334 | if price_fxn_type == 1: # Linear ramp (use for linearly increasing, decreasing, constant prices) 335 | assert len( 336 | row) == 7, 'generate_synthetic_prices.py ERROR: Incorrect number of parameters for linear ramp in row ' + str( 337 | CropCount) 338 | 339 | Pi = float(row[3]) 340 | Pf = float(row[4]) 341 | perturb = int(row[5]) 342 | if perturb == 1: 343 | s_p = float(row[6]) 344 | else: 345 | s_p = 0.0 346 | 347 | P = GeneratePrice_linear(Nt, Pi, Pf, perturb, s_p) 348 | 349 | elif price_fxn_type == 2: # Step function (use for step increase or decrease in price) 350 | assert len( 351 | row) == 8, 'generate_synthetic_prices.py ERROR: Incorrect number of parameters for step change in row ' + str( 352 | CropCount) 353 | 354 | Pi = float(row[3]) 355 | Pf = float(row[4]) 356 | t_step = float(row[5]) 357 | perturb = int(row[6]) 358 | if perturb == 1: 359 | s_p = float(row[7]) 360 | else: 361 | s_p = 0.0 362 | 363 | P = GeneratePrice_step(Nt, Pi, Pf, t_step, perturb, s_p) 364 | 365 | elif price_fxn_type == 3: # Sinusoidal fluctuation in price 366 | assert len( 367 | row) == 8, 'generate_synthetic_prices.py ERROR: Incorrect number of parameters for periodic price in row ' + str( 368 | CropCount) 369 | 370 | Pmag = float(row[3]) 371 | Pamp = float(row[4]) 372 | n_period = float(row[5]) 373 | perturb = int(row[6]) 374 | if perturb == 1: 375 | s_p = float(row[7]) 376 | else: 377 | s_p = 0.0 378 | 379 | P = GeneratePrice_periodic(Nt, Pmag, Pamp, n_period, perturb, s_p) 380 | 381 | if CropCount == 1: 382 | P_allcrops = P 383 | else: 384 | P_allcrops = np.column_stack((P_allcrops, P)) 385 | 386 | if CropCount != Nc: 387 | print('\nERROR: Mismatch in number of crops read and provided as input\n') 388 | print(str(Nc) + ' crops were expected, ' + str(CropCount) + ' were read. Check input\n') 389 | sys.exit() 390 | 391 | with open(CropFileOut, 'w') as fp: 392 | 393 | np.savetxt(fp, np.asarray(crop_ids, dtype=np.int32).reshape((1, Nc)), delimiter=',', fmt='%d') 394 | np.savetxt(fp, P_allcrops, delimiter=',', fmt='%.2f') 395 | 396 | fp.close() 397 | 398 | 399 | if __name__ == "__main__": 400 | main(sys.argv) 401 | --------------------------------------------------------------------------------