├── .gitmodules ├── test ├── __init__.py ├── ubyte.nc ├── issue1152.nc ├── issue671.nc ├── issue672.nc ├── test_gold.nc ├── CRM032_test1.nc ├── netcdf_dummy_file.nc ├── 20171025_2056.Cloud_Top_Height.nc ├── test_issue908.py ├── test_ncrc.py ├── filter_availability.py ├── test_open_mem.py ├── test_refcount.py ├── test_create_mem.py ├── test_no_iter_contains.py ├── test_shape.py ├── run_all.py ├── test_unicode.py ├── test_filepath.py ├── test_unicodeatt.py ├── test_dap.py ├── test_grps2.py ├── test_cdf5.py ├── test_chunk_cache.py ├── test_get_fill_value.py ├── test_compression_szip.py ├── test_get_variables_by_attributes.py ├── test_scalarvar.py ├── test_masked5.py ├── test_multiple_open_close.py ├── test_Unsigned.py ├── test_unlimdim.py ├── test_compression_zstd.py ├── test_compression_bzip2.py ├── test_cdl.py ├── test_grps.py ├── test_compoundatt.py ├── test_diskless.py ├── test_vars.py ├── test_complex.py ├── test_enum.py ├── test_masked6.py ├── test_compression_blosc.py ├── test_masked2.py ├── test_masked4.py ├── test_types.py ├── test_stringarr.py ├── test_multifile2.py ├── test_rename.py ├── test_compression_quant.py ├── test_alignment.py ├── test_masked3.py ├── test_compoundvar.py ├── test_dims.py └── test_endian.py ├── src └── netCDF4 │ ├── py.typed │ ├── plugins │ └── empty.txt │ ├── _netCDF4.pyi │ └── __init__.py ├── external ├── README └── nc_complex │ └── include │ └── generated_fallbacks │ └── nc_complex_version.h ├── create_docs.sh ├── checkversion.py ├── include ├── no_parallel_support_imports.pxi.in ├── parallel_support_imports.pxi.in ├── mpi-compat.h └── membuf.pyx ├── README.htmldocs ├── .github ├── dependabot.yml ├── stubtest-allowlist └── workflows │ ├── miniconda.yml │ ├── build_master.yml │ ├── build_latest.yml │ ├── build_old.yml │ └── cibuildwheel.yml ├── examples ├── subset.py ├── mpi_example_compressed.py ├── json_att.py ├── README.md ├── mpi_example.py ├── bench.py ├── complex_numbers.py ├── threaded_read.py ├── test_stringarr.py ├── bench_compress.py ├── bench_compress4.py ├── bench_compress2.py ├── bench_compress3.py └── bench_diskless.py ├── MANIFEST.in ├── LICENSE ├── README.release ├── man ├── ncinfo.1 ├── nc4tonc3.1 └── nc3tonc4.1 ├── setup.cfg └── pyproject.toml /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/netCDF4/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/netCDF4/plugins/empty.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/ubyte.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/ubyte.nc -------------------------------------------------------------------------------- /test/issue1152.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/issue1152.nc -------------------------------------------------------------------------------- /test/issue671.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/issue671.nc -------------------------------------------------------------------------------- /test/issue672.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/issue672.nc -------------------------------------------------------------------------------- /test/test_gold.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/test_gold.nc -------------------------------------------------------------------------------- /test/CRM032_test1.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/CRM032_test1.nc -------------------------------------------------------------------------------- /test/netcdf_dummy_file.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/netcdf_dummy_file.nc -------------------------------------------------------------------------------- /test/20171025_2056.Cloud_Top_Height.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Unidata/netcdf4-python/HEAD/test/20171025_2056.Cloud_Top_Height.nc -------------------------------------------------------------------------------- /external/README: -------------------------------------------------------------------------------- 1 | * 20240616: remove submodule, include v0.2.0 tag source files (https://github.com/PlasmaFAIR/nc-complex/releases/tag/v0.2.0). 2 | -------------------------------------------------------------------------------- /create_docs.sh: -------------------------------------------------------------------------------- 1 | # use pdoc (https://pdoc3.github.io/pdoc/) to generate API docs 2 | pdoc3 --html --config show_source_code=False --force -o 'docs' netCDF4 3 | /bin/cp -f docs/netCDF4/index.html docs/index.html 4 | -------------------------------------------------------------------------------- /external/nc_complex/include/generated_fallbacks/nc_complex_version.h: -------------------------------------------------------------------------------- 1 | #define NC_COMPLEX_GIT_SHA1 "37310ed00f3910974bdefefcdfa4787588651f59" 2 | #define NC_COMPLEX_GIT_VERSION "v0.2.0" 3 | #define NC_COMPLEX_GIT_STATE "clean" 4 | #define NC_COMPLEX_GIT_DATE "2023-12-08" 5 | -------------------------------------------------------------------------------- /checkversion.py: -------------------------------------------------------------------------------- 1 | import netCDF4, numpy 2 | print('netcdf4-python version: %s'%netCDF4.__version__) 3 | print('HDF5 lib version: %s'%netCDF4.__hdf5libversion__) 4 | print('netcdf lib version: %s'%netCDF4.__netcdf4libversion__) 5 | print('numpy version %s' % numpy.__version__) 6 | -------------------------------------------------------------------------------- /include/no_parallel_support_imports.pxi.in: -------------------------------------------------------------------------------- 1 | # Stubs for when parallel support is not enabled 2 | 3 | ctypedef int MPI_Comm 4 | ctypedef int MPI_Info 5 | ctypedef int Comm 6 | ctypedef int Info 7 | cdef MPI_Comm MPI_COMM_WORLD 8 | cdef MPI_Info MPI_INFO_NULL 9 | MPI_COMM_WORLD = 0 10 | MPI_INFO_NULL = 0 11 | -------------------------------------------------------------------------------- /include/parallel_support_imports.pxi.in: -------------------------------------------------------------------------------- 1 | # Imports and typedefs required at compile time for enabling parallel support 2 | 3 | cimport mpi4py.MPI as MPI 4 | from mpi4py.libmpi cimport ( 5 | MPI_Comm, 6 | MPI_Info, 7 | MPI_Comm_dup, 8 | MPI_Info_dup, 9 | MPI_Comm_free, 10 | MPI_Info_free, 11 | MPI_INFO_NULL, 12 | MPI_COMM_WORLD, 13 | ) 14 | 15 | ctypedef MPI.Comm Comm 16 | ctypedef MPI.Info Info 17 | -------------------------------------------------------------------------------- /README.htmldocs: -------------------------------------------------------------------------------- 1 | To update web docs at http://github.unidata.io/netcdf4-python: 2 | 3 | First install pdoc (https://github.com/pdoc3/pdoc) 4 | 5 | Then in netcdf4-python github clone directory (after building and 6 | installing github master), generate docs by running create_docs.sh. 7 | 8 | Docs are put in docs/index.html. 9 | 10 | Github pages (https://unidata.github.io/netcdf4-python/) points to docs/index.html 11 | in master branch. 12 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # See https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot 2 | 3 | version: 2 4 | updates: 5 | 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | interval: "daily" 10 | labels: 11 | - "Bot" 12 | groups: 13 | github-actions: 14 | patterns: 15 | - '*' 16 | -------------------------------------------------------------------------------- /test/test_issue908.py: -------------------------------------------------------------------------------- 1 | import netCDF4, unittest 2 | import numpy as np 3 | import pathlib 4 | 5 | 6 | class Issue908TestCase(unittest.TestCase): 7 | 8 | def setUp(self): 9 | self.nc = netCDF4.Dataset(pathlib.Path(__file__).parent / "CRM032_test1.nc") 10 | 11 | def tearDown(self): 12 | self.nc.close() 13 | 14 | def runTest(self): 15 | data = self.nc['rgrid'][:] 16 | assert data.all() is np.ma.masked 17 | 18 | if __name__ == '__main__': 19 | unittest.main() 20 | -------------------------------------------------------------------------------- /test/test_ncrc.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import netCDF4 3 | from netCDF4 import __has_nc_rc_set__ 4 | 5 | class NCRCTestCase(unittest.TestCase): 6 | def setUp(self): 7 | pass 8 | 9 | def tearDown(self): 10 | pass 11 | 12 | def runTest(self): 13 | """test rc_get, rc_set functions""" 14 | if __has_nc_rc_set__: 15 | netCDF4.rc_set('foo','bar') 16 | assert netCDF4.rc_get('foo') == 'bar' 17 | assert netCDF4.rc_get('bar') == None 18 | 19 | if __name__ == '__main__': 20 | unittest.main() 21 | -------------------------------------------------------------------------------- /include/mpi-compat.h: -------------------------------------------------------------------------------- 1 | /* Author: Lisandro Dalcin */ 2 | /* Contact: dalcinl@gmail.com */ 3 | 4 | #ifndef MPI_COMPAT_H 5 | #define MPI_COMPAT_H 6 | 7 | #include "netcdf-compat.h" 8 | 9 | #if HAS_PARALLEL_SUPPORT 10 | 11 | #include 12 | 13 | #ifdef MSMPI_VER 14 | #define PyMPI_HAVE_MPI_Message 1 15 | #endif 16 | 17 | #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) 18 | typedef void *PyMPI_MPI_Message; 19 | #define MPI_Message PyMPI_MPI_Message 20 | #endif 21 | 22 | #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) 23 | typedef void *PyMPI_MPI_Session; 24 | #define MPI_Session PyMPI_MPI_Session 25 | #endif 26 | 27 | #endif /* HAS_PARALLEL_SUPPORT */ 28 | 29 | #endif/*MPI_COMPAT_H*/ 30 | -------------------------------------------------------------------------------- /examples/subset.py: -------------------------------------------------------------------------------- 1 | # use 'orthogonal indexing' feature to subselect data over CONUS. 2 | import netCDF4 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | 6 | # use real data from CFS reanalysis. 7 | # note: we're reading GRIB2 data! 8 | URL="http://nomads.ncdc.noaa.gov/thredds/dodsC/modeldata/cmd_flxf/2010/201007/20100701/flxf00.gdas.2010070100.grb2" 9 | nc = netCDF4.Dataset(URL) 10 | lats = nc.variables['lat'][:]; lons = nc.variables['lon'][:] 11 | latselect = np.logical_and(lats>25,lats<50) 12 | lonselect = np.logical_and(lons>230,lons<305) 13 | data = nc.variables['Soil_moisture_content'][0,0,latselect,lonselect] 14 | plt.contourf(data[::-1]) # flip latitudes so they go south -> north 15 | plt.show() 16 | -------------------------------------------------------------------------------- /test/filter_availability.py: -------------------------------------------------------------------------------- 1 | from tempfile import NamedTemporaryFile 2 | from netCDF4 import ( 3 | Dataset, 4 | __has_zstandard_support__, 5 | __has_bzip2_support__, 6 | __has_blosc_support__, 7 | __has_szip_support__, 8 | ) 9 | import os 10 | 11 | # True if plugins have been disabled 12 | no_plugins = os.getenv("NO_PLUGINS") 13 | 14 | 15 | with NamedTemporaryFile(suffix=".nc", delete=False) as tf: 16 | with Dataset(tf.name, "w") as nc: 17 | has_zstd_filter = __has_zstandard_support__ and nc.has_zstd_filter() 18 | has_bzip2_filter = __has_bzip2_support__ and nc.has_bzip2_filter() 19 | has_blosc_filter = __has_blosc_support__ and nc.has_blosc_filter() 20 | has_szip_filter = __has_szip_support__ and nc.has_szip_filter() 21 | -------------------------------------------------------------------------------- /examples/mpi_example_compressed.py: -------------------------------------------------------------------------------- 1 | # to run: mpirun -np 4 python mpi_example_compressed.py 2 | import sys 3 | from mpi4py import MPI 4 | import numpy as np 5 | from netCDF4 import Dataset 6 | rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) 7 | nc = Dataset('parallel_test_compressed.nc', 'w', parallel=True) 8 | d = nc.createDimension('dim',4) 9 | v = nc.createVariable('var', np.int32, 'dim', zlib=True) 10 | v[:] = np.arange(4) 11 | nc.close() 12 | # read compressed files in parallel, check the data, try to rewrite some data 13 | nc = Dataset('parallel_test_compressed.nc', 'a', parallel=True) 14 | v = nc['var'] 15 | assert rank==v[rank] 16 | v.set_collective(True) # issue #1108 (var must be in collective mode or write will fail) 17 | v[rank]=2*rank 18 | nc.close() 19 | -------------------------------------------------------------------------------- /examples/json_att.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset 2 | import json 3 | # example showing how python objects (lists, dicts, None, True) 4 | # can be serialized as strings, saved as netCDF attributes, 5 | # and then converted back to python objects using json. 6 | ds = Dataset('json.nc', 'w') 7 | ds.pythonatt1 = json.dumps(['foo', {'bar': ['baz', None, 1.0, 2]}]) 8 | ds.pythonatt2 = "true" # converted to bool 9 | ds.pythonatt3 = "null" # converted to None 10 | print(ds) 11 | ds.close() 12 | ds = Dataset('json.nc') 13 | def convert_json(s): 14 | try: 15 | a = json.loads(s) 16 | return a 17 | except: 18 | return s 19 | x = convert_json(ds.pythonatt1) 20 | print(type(x)) 21 | print(x) 22 | print(convert_json(ds.pythonatt2)) 23 | print(convert_json(ds.pythonatt3)) 24 | ds.close() 25 | -------------------------------------------------------------------------------- /test/test_open_mem.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | import netCDF4 4 | 5 | CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) 6 | 7 | 8 | class TestOpenMem(unittest.TestCase): 9 | def test_mem_open(self): 10 | fpath = os.path.join(CURRENT_DIR, "netcdf_dummy_file.nc") 11 | 12 | with open(fpath, 'rb') as f: 13 | nc_bytes = f.read() 14 | 15 | if not netCDF4.__has_nc_open_mem__: 16 | with self.assertRaises(ValueError): 17 | netCDF4.Dataset('foo_bar', memory=nc_bytes) 18 | return 19 | 20 | with netCDF4.Dataset('foo_bar', memory=nc_bytes) as nc: 21 | assert nc.filepath() == 'foo_bar' 22 | assert nc.project_summary == 'Dummy netCDF file' 23 | 24 | if __name__ == '__main__': 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include docs/index.html 2 | recursive-include man * 3 | recursive-include external * 4 | include MANIFEST.in 5 | include README.htmldocs 6 | include Changelog 7 | include setup.cfg 8 | include examples/*py 9 | include examples/README.md 10 | exclude examples/data 11 | include test/*py 12 | include test/*nc 13 | include src/netCDF4/__init__.py 14 | include src/netCDF4/_netCDF4.pyx 15 | exclude src/netCDF4/_netCDF4.c 16 | include src/netCDF4/utils.py 17 | include src/netCDF4/plugins/empty.txt 18 | include src/netCDF4/py.typed 19 | include src/netCDF4/*.pyi 20 | include include/netCDF4.pxi 21 | include include/mpi-compat.h 22 | include include/membuf.pyx 23 | include include/netcdf-compat.h 24 | include include/no_parallel_support_imports.pxi.in 25 | include include/parallel_support_imports.pxi.in 26 | include *.md 27 | include *.py 28 | include *.release 29 | include *.sh 30 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | * `tutorial.py`: code from introduction section of documentation. 2 | * `json_att.py`: shows to to use json to serialize python objects, save them as 3 | netcdf attributes, and then convert them back to python objects. 4 | * `subset.py`: shows how to use 'orthogonal indexing' to select geographic regions. 5 | * `reading_netcdf.ipynb`: ipython notebook from Unidata python workshop. 6 | * `writing_netcdf.ipynb`: ipython notebook from Unidata python workshop. 7 | * `threaded_read.py`: test script for concurrent threaded reads. 8 | * `bench.py`: benchmarks for reading/writing using different formats. 9 | * `bench_compress*.py``: benchmarks for reading/writing with compression. 10 | * `bench_diskless.py`: benchmarks for 'diskless' IO. 11 | * `test_stringarr.py`: test utilities for converting arrays of fixed-length strings 12 | to arrays of characters (with an extra dimension), and vice-versa. 13 | Useful since netcdf does not have a datatype for fixed-length string arrays. 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2008 Jeffrey Whitaker 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /include/membuf.pyx: -------------------------------------------------------------------------------- 1 | # Creates a memoryview from a malloced C pointer, 2 | # which will be freed when the python object is garbage collected. 3 | # Code found here is derived from 4 | # http://stackoverflow.com/a/28166272/428751 5 | from cpython.buffer cimport PyBuffer_FillInfo 6 | from libc.stdlib cimport free 7 | 8 | # create a python memoryview object from a raw pointer. 9 | cdef memview_fromptr(void *memory, size_t size): 10 | cdef _MemBuf buf = _MemBuf() 11 | buf.memory = memory # malloced void pointer 12 | buf.size = size # size of pointer in bytes 13 | return memoryview(buf) 14 | 15 | # private extension type that implements buffer protocol. 16 | cdef class _MemBuf: 17 | cdef void *memory 18 | cdef size_t size 19 | def __getbuffer__(self, Py_buffer *buf, int flags): 20 | PyBuffer_FillInfo(buf, self, self.memory, self.size, 1, flags) 21 | def __releasebuffer__(self, Py_buffer *buf): 22 | # why doesn't this do anything?? 23 | pass 24 | def __dealloc__(self): 25 | free(self.memory) 26 | -------------------------------------------------------------------------------- /test/test_refcount.py: -------------------------------------------------------------------------------- 1 | import unittest, netCDF4, tempfile, os 2 | 3 | file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 4 | 5 | class RefCountTestCase(unittest.TestCase): 6 | 7 | def setUp(self): 8 | nc = netCDF4.Dataset(file_name, mode='w', keepweakref=True, format='NETCDF4') 9 | d = nc.createDimension('fred', 2000) 10 | v = nc.createVariable('frank','f',('fred',)) 11 | self.file = file_name 12 | self.nc = nc 13 | 14 | def tearDown(self): 15 | # Remove the temporary files 16 | os.remove(self.file) 17 | 18 | def runTest(self): 19 | """testing garbage collection (issue 218)""" 20 | # this should trigger garbage collection (__dealloc__ method) 21 | del self.nc 22 | # if __dealloc__ not called to close file, then this 23 | # will fail with "Permission denied" error (since you can't 24 | # open a file 'w' that is already open for writing). 25 | nc = netCDF4.Dataset(self.file, mode='w', format='NETCDF4') 26 | 27 | if __name__ == '__main__': 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /test/test_create_mem.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import netCDF4 3 | import numpy as np 4 | from numpy.testing import assert_array_equal 5 | 6 | 7 | @unittest.skipIf(not netCDF4.__has_nc_create_mem__, "missing `nc_create_mem`") 8 | class TestCreateMem(unittest.TestCase): 9 | def test_mem_create(self): 10 | def check_inmemory(format): 11 | # memory is 'advisory size' - not needed for NETCDF4/HDF5 12 | # but is used for NETCDF3. 13 | nc = netCDF4.Dataset('test.nc','w',memory=1028,format=format) 14 | d = nc.createDimension('x',None) 15 | v = nc.createVariable('v',np.int32,'x') 16 | data = np.arange(5) 17 | v[0:5] = data 18 | # retrieve memory buffer 19 | b = nc.close() 20 | # open a new file using this memory buffer 21 | nc2 = netCDF4.Dataset('test2.nc','r',memory=b) 22 | assert_array_equal(nc2['v'][:],data) 23 | nc2.close() 24 | check_inmemory('NETCDF3_CLASSIC') 25 | check_inmemory('NETCDF4_CLASSIC') 26 | 27 | if __name__ == '__main__': 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /.github/stubtest-allowlist: -------------------------------------------------------------------------------- 1 | netCDF4.RealTypeLiteral 2 | netCDF4.ComplexTypeLiteral 3 | netCDF4.NumericTypeLiteral 4 | netCDF4.CharTypeLiteral 5 | netCDF4.TypeLiteral 6 | netCDF4.NumPyRealType 7 | netCDF4.NumPyComplexType 8 | netCDF4.NumPyNumericType 9 | netCDF4.NetCDFUDTClass 10 | netCDF4.AccessMode 11 | netCDF4.CompressionLevel 12 | netCDF4.CompressionType 13 | netCDF4.DatatypeType 14 | netCDF4.DimensionsType 15 | netCDF4.DiskFormat 16 | netCDF4.EndianType 17 | netCDF4.Format 18 | netCDF4.QuantizeMode 19 | netCDF4.CalendarType 20 | netCDF4.DateTimeArray 21 | netCDF4.FiltersDict 22 | netCDF4.SzipInfo 23 | netCDF4.BloscInfo 24 | netCDF4.BoolInt 25 | netCDF4.VarT 26 | netCDF4.RealVarT 27 | netCDF4.ComplexVarT 28 | netCDF4.NumericVarT 29 | netCDF4.Dimension.__reduce_cython__ 30 | netCDF4.Dimension.__setstate_cython__ 31 | netCDF4.Variable.auto_complex 32 | netCDF4.Variable.__iter__ 33 | netCDF4._netCDF4.Dimension.__reduce_cython__ 34 | netCDF4._netCDF4.Dimension.__setstate_cython__ 35 | netCDF4._netCDF4.NC_DISKLESS 36 | netCDF4._netCDF4.NC_PERSIST 37 | netCDF4._netCDF4.Variable.auto_complex 38 | netCDF4._netCDF4.Variable.__iter__ 39 | netCDF4._netCDF4.__reduce_cython__ 40 | netCDF4._netCDF4.__setstate_cython__ 41 | netCDF4._netCDF4.__test__ 42 | netCDF4.utils -------------------------------------------------------------------------------- /test/test_no_iter_contains.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import unittest 4 | 5 | import netCDF4 6 | 7 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 8 | 9 | 10 | class TestNoIterNoContains(unittest.TestCase): 11 | def setUp(self) -> None: 12 | self.file = FILE_NAME 13 | with netCDF4.Dataset(self.file, "w") as dataset: 14 | # just create a simple variable 15 | dataset.createVariable("var1", int) 16 | 17 | def tearDown(self) -> None: 18 | os.remove(self.file) 19 | 20 | def test_no_iter(self) -> None: 21 | """Verify that iteration is explicitly not supported""" 22 | with netCDF4.Dataset(self.file, "r") as dataset: 23 | with self.assertRaises(TypeError): 24 | for _ in dataset: # type: ignore # type checker catches that this doesn't work 25 | pass 26 | 27 | def test_no_contains(self) -> None: 28 | """Verify the membership operations are explicity not supported""" 29 | with netCDF4.Dataset(self.file, "r") as dataset: 30 | with self.assertRaises(TypeError): 31 | _ = "var1" in dataset 32 | 33 | if __name__ == "__main__": 34 | unittest.main(verbosity=2) 35 | -------------------------------------------------------------------------------- /test/test_shape.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset 2 | import tempfile, unittest, os 3 | import numpy as np 4 | 5 | file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 6 | xdim=None; ydim=121; zdim=169 7 | datashape = (ydim,zdim) 8 | data = np.ones(datashape,dtype=np.float64) 9 | 10 | class ShapeTestCase(unittest.TestCase): 11 | 12 | def setUp(self): 13 | self.file = file_name 14 | f = Dataset(file_name,'w') 15 | f.createDimension('x',xdim) 16 | f.createDimension('y',ydim) 17 | f.createDimension('z',zdim) 18 | v = f.createVariable('data',np.float64,('x','y','z')) 19 | f.close() 20 | 21 | def tearDown(self): 22 | # Remove the temporary files 23 | os.remove(self.file) 24 | 25 | def runTest(self): 26 | """test for issue 90 (array shape should not be modified by 27 | assignment to netCDF variable)""" 28 | f = Dataset(self.file, 'a') 29 | v = f.variables['data'] 30 | v[0] = data 31 | # make sure shape of data array 32 | # is not changed by assigning it 33 | # to a netcdf var with one more dimension (issue 90) 34 | assert data.shape == datashape 35 | f.close() 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /test/run_all.py: -------------------------------------------------------------------------------- 1 | import glob, os, sys, unittest, struct, tempfile 2 | from netCDF4 import __hdf5libversion__,__netcdf4libversion__,__version__, Dataset 3 | # can also just run 4 | # python -m unittest discover . 'tst*py' 5 | 6 | # Find all test files. 7 | test_files = glob.glob('test_*.py') 8 | # run opendap test first (issue #856). 9 | test_files.remove('test_dap.py') 10 | test_files.insert(0,'test_dap.py') 11 | 12 | # Build the test suite from the tests found in the test files. 13 | testsuite = unittest.TestSuite() 14 | for f in test_files: 15 | m = __import__(os.path.splitext(f)[0]) 16 | testsuite.addTests(unittest.TestLoader().loadTestsFromModule(m)) 17 | 18 | if __name__ == '__main__': 19 | import numpy, cython 20 | sys.stdout.write('\n') 21 | sys.stdout.write('netcdf4-python version: %s\n' % __version__) 22 | sys.stdout.write('HDF5 lib version: %s\n' % __hdf5libversion__) 23 | sys.stdout.write('netcdf lib version: %s\n' % __netcdf4libversion__) 24 | sys.stdout.write('numpy version %s\n' % numpy.__version__) 25 | sys.stdout.write('cython version %s\n' % cython.__version__) 26 | runner = unittest.TextTestRunner(verbosity=(2 if "-v" in sys.argv else 1)) 27 | result = runner.run(testsuite) 28 | if not result.wasSuccessful(): 29 | sys.exit(1) 30 | -------------------------------------------------------------------------------- /test/test_unicode.py: -------------------------------------------------------------------------------- 1 | import netCDF4 2 | import numpy as np 3 | import sys, unittest, os, tempfile 4 | 5 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 6 | ATT1 = '\u03a0\u03a3\u03a9' 7 | ATT2 = 'x\xb0' 8 | ATT3 = ['\u03a0', '\u03a3', '\u03a9'] 9 | DIM_NAME = 'x\xb0' 10 | VAR_NAME = 'Andr\xe9' 11 | 12 | class UnicodeTestCase(unittest.TestCase): 13 | 14 | def setUp(self): 15 | self.file = FILE_NAME 16 | f = netCDF4.Dataset(self.file,'w') 17 | f.attribute1 = ATT1 18 | f.attribute2 = ATT2 19 | f.attribute3 = ATT3 20 | d = f.createDimension(DIM_NAME, None) 21 | v = f.createVariable(VAR_NAME, np.float64, (DIM_NAME,)) 22 | f.close() 23 | 24 | def tearDown(self): 25 | # Remove the temporary files 26 | os.remove(self.file) 27 | 28 | def runTest(self): 29 | """testing unicode""" 30 | f = netCDF4.Dataset(self.file, 'r') 31 | d = f.dimensions[DIM_NAME] 32 | v = f.variables[VAR_NAME] 33 | # check accessing individual attributes. 34 | assert f.attribute1 == ATT1 35 | assert f.attribute2 == ATT2 36 | #assert f.attribute3 == ''.join(ATT3) 37 | # behavior changed issue 770 38 | assert f.attribute3 == ATT3 39 | f.close() 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /test/test_filepath.py: -------------------------------------------------------------------------------- 1 | import os, sys, shutil 2 | import tempfile 3 | import unittest 4 | import netCDF4 5 | import pathlib 6 | 7 | 8 | @unittest.skipIf(not netCDF4.__has_nc_inq_path__, "missing `nc_inq_path`") 9 | class test_filepath(unittest.TestCase): 10 | def setUp(self): 11 | self.netcdf_file = pathlib.Path(__file__).parent / "netcdf_dummy_file.nc" 12 | self.nc = netCDF4.Dataset(self.netcdf_file) 13 | 14 | def tearDown(self): 15 | self.nc.close() 16 | 17 | def test_filepath(self): 18 | assert self.nc.filepath() == str(self.netcdf_file) 19 | 20 | def test_filepath_with_non_ascii_characters(self): 21 | # create nc-file in a filepath using a cp1252 string 22 | tmpdir = tempfile.mkdtemp() 23 | filepath = os.path.join(tmpdir,b'Pl\xc3\xb6n.nc'.decode('cp1252')) 24 | nc = netCDF4.Dataset(filepath,'w',encoding='cp1252') 25 | filepatho = nc.filepath(encoding='cp1252') 26 | assert filepath == filepatho 27 | assert filepath.encode('cp1252') == filepatho.encode('cp1252') 28 | nc.close() 29 | shutil.rmtree(tmpdir) 30 | 31 | def test_no_such_file_raises(self): 32 | fname = 'not_a_nc_file.nc' 33 | with self.assertRaisesRegex(OSError, fname): 34 | netCDF4.Dataset(fname, 'r') 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /README.release: -------------------------------------------------------------------------------- 1 | * create a release branch ('vX.Y.Zrel'). In the release branch... 2 | * make sure version number in PKG-INFO, setup.py and netCDF4/_netCDF4.pyx are up to date 3 | (in _netCDF4.pyx, change 'Version' in first line of docstring at top of file, 4 | and __version__ variable). 5 | * update Changelog and README.md as needed. 6 | * commit and push all of the above changes. 7 | * install the module (python setup.py install), then run 'sh create_docs.sh' 8 | to update html docs. Commit and push the update to docs/netCDF4/index.html. 9 | * create a pull request for the release branch. 10 | * After release branch has been merged, tag a release 11 | git tag -a vX.Y.Zrel -m "version X.Y.Z release" 12 | git push origin --tags 13 | * push an empty commit to the netcdf4-python-wheels repo to trigger new builds. 14 | (e.g. git commit --allow-empty -m "Trigger build") 15 | You will likely want to edit the .travis.yml file at 16 | https://github.com/MacPython/netcdf4-python-wheels to specify the BUILD_COMMIT before triggering a build. 17 | * update the pypi entry, upload the wheels from wheels.scipy.org. 18 | Lastly, create a source tarball using 19 | 'python setup.py sdist' and upload to pypi. 20 | * update web docs by copying docs/netCDF4/index.html somewhere, switch 21 | to the gh-pages branch, copy the index.html file back, commit and push 22 | the updated index.html file (see README.gh-pages). 23 | -------------------------------------------------------------------------------- /src/netCDF4/_netCDF4.pyi: -------------------------------------------------------------------------------- 1 | # The definitions are intentionally done in the __init__. 2 | # This file only exists in case someone imports from netCDF4._netCDF4 3 | from . import ( 4 | CompoundType, 5 | Dataset, 6 | Dimension, 7 | EnumType, 8 | Group, 9 | MFDataset, 10 | MFTime, 11 | NetCDF4MissingFeatureException, 12 | Variable, 13 | VLType, 14 | __has_blosc_support__, 15 | __has_bzip2_support__, 16 | __has_cdf5_format__, 17 | __has_nc_create_mem__, 18 | __has_nc_inq_format_extended__, 19 | __has_nc_inq_path__, 20 | __has_nc_open_mem__, 21 | __has_nc_rc_set__, 22 | __has_ncfilter__, 23 | __has_parallel4_support__, 24 | __has_parallel_support__, 25 | __has_pnetcdf_support__, 26 | __has_quantization_support__, 27 | __has_rename_grp__, 28 | __has_set_alignment__, 29 | __has_szip_support__, 30 | __has_zstandard_support__, 31 | __hdf5libversion__, 32 | __netcdf4libversion__, 33 | __version__, 34 | chartostring, 35 | date2index, 36 | date2num, 37 | default_encoding, 38 | default_fillvals, 39 | dtype_is_complex, 40 | get_alignment, 41 | get_chunk_cache, 42 | getlibversion, 43 | is_native_big, 44 | is_native_little, 45 | num2date, 46 | rc_get, 47 | rc_set, 48 | set_alignment, 49 | set_chunk_cache, 50 | stringtoarr, 51 | stringtochar, 52 | unicode_error, 53 | ) 54 | -------------------------------------------------------------------------------- /test/test_unicodeatt.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset 2 | import sys, unittest, os, tempfile 3 | 4 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 5 | 6 | class UnicodeAttTestCase(unittest.TestCase): 7 | 8 | def setUp(self): 9 | self.file = FILE_NAME 10 | nc = Dataset(self.file,'w') 11 | # write as a utf-8 string 12 | nc.stratt = b'\xe6\xb7\xb1\xe5\x85\xa5 Python'.decode('utf-8') 13 | # write as raw bytes (decoded string is same as above with 'big5' encoding) 14 | nc.stratt2 = b'\xb2`\xa4J Python' 15 | # same as above, but attribute forced to be of type NC_STRING 16 | nc.setncattr_string('stratt3',b'\xb2`\xa4J Python') 17 | nc.close() 18 | 19 | def tearDown(self): 20 | # Remove the temporary files 21 | os.remove(self.file) 22 | 23 | def runTest(self): 24 | """testing unicode attributes""" 25 | nc = Dataset(self.file, 'r') 26 | assert nc.stratt.encode('utf-8') == b'\xe6\xb7\xb1\xe5\x85\xa5 Python' 27 | stratt2 = nc.getncattr('stratt2',encoding='big5') # decodes using big5 28 | stratt3 = nc.getncattr('stratt3',encoding='big5') # same as above 29 | assert stratt2.encode('big5') == b'\xb2`\xa4J Python' 30 | assert nc.stratt == stratt2 # decoded strings are the same 31 | assert nc.stratt == stratt3 # decoded strings are the same 32 | nc.close() 33 | 34 | if __name__ == '__main__': 35 | unittest.main() 36 | -------------------------------------------------------------------------------- /man/ncinfo.1: -------------------------------------------------------------------------------- 1 | .\" (C) Copyright 2015, Ross Gammon , 2 | .\" 3 | .TH NCINFO 1 "22 Mar 2015" 4 | .\" 5 | .SH NAME 6 | ncinfo \- a program to print summary information about a netCDF file 7 | .SH SYNOPSIS 8 | .B ncinfo 9 | .RB [ \-h ] 10 | .RB [ \-g|\-\-group=\fIgrp\fR ] 11 | .RB [ \-v|\-\-variable=\fIvar\fR ] 12 | .RB [ \-d|\-\-dimension=\fIdim\fR ] 13 | .I filename 14 | .br 15 | .SH DESCRIPTION 16 | This manual page documents briefly the 17 | .B ncinfo 18 | command. 19 | .PP 20 | \fBncinfo\fP is a program that prints summary information about a netCDF file 21 | .SH OPTIONS 22 | These programs follow the usual GNU command line syntax, with long 23 | options starting with two dashes (`-'). 24 | A summary of options is included below. 25 | .TP 26 | .B \-h 27 | Shows a summary of the available options. 28 | .TP 29 | .B \-g grp, \-\-group=grp 30 | Prints information for this group. The default group is the root group. Nested groups are specified using posix paths e.g. group1/group2/group3. 31 | .TP 32 | .B \-v , \-\-variable= 33 | Prints information for this variable. 34 | .TP 35 | .B \-d , \-\-dimension= 36 | Prints information for this dimension. 37 | .TP 38 | The filename of the netCDF file must be supplied as the last argument. 39 | .SH SEE ALSO 40 | .BR nc3tonc4 (1), 41 | .BR nc4tonc3 (1). 42 | .br 43 | .SH AUTHOR 44 | This manual page was written by Ross Gammon based on the options displayed by ncinfo \-h. 45 | -------------------------------------------------------------------------------- /test/test_dap.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import netCDF4 3 | import numpy as np 4 | from datetime import datetime, timedelta 5 | from numpy.testing import assert_array_almost_equal 6 | import os 7 | 8 | # test accessing data over http with opendap. 9 | 10 | yesterday = datetime.now() - timedelta(days=1) 11 | URL = f'http://nomads.ncep.noaa.gov/dods/gfs_1p00/gfs{yesterday:%Y%m%d}/gfs_1p00_00z' 12 | URL_https = 'https://www.neracoos.org/erddap/griddap/WW3_EastCoast_latest' 13 | varname = 'hgtsfc' 14 | data_min = -40; data_max = 5900 15 | varshape = (181, 360) 16 | 17 | 18 | @unittest.skipIf(os.getenv("NO_NET"), "network tests disabled") 19 | class DapTestCase(unittest.TestCase): 20 | def setUp(self): 21 | pass 22 | 23 | def tearDown(self): 24 | pass 25 | 26 | def runTest(self): 27 | """testing access of data over http using opendap""" 28 | ncfile = netCDF4.Dataset(URL) 29 | assert varname in ncfile.variables.keys() 30 | var = ncfile.variables[varname] 31 | data = var[0,...] 32 | assert data.shape == varshape 33 | assert np.abs(data.min()-data_min) < 10 34 | assert np.abs(data.max()-data_max) < 100 35 | ncfile.close() 36 | # test https support (linked curl lib must built with openssl support) 37 | ncfile = netCDF4.Dataset(URL_https) 38 | assert ncfile['hs'].long_name=='Significant Wave Height' 39 | ncfile.close() 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /test/test_grps2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import netCDF4 6 | 7 | # test implicit group creation by using unix-like paths 8 | # in createVariable and createGroups (added in 1.1.8). 9 | # also test Dataset.__getitem__, also added in 1.1.8. 10 | 11 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 12 | 13 | class Groups2TestCase(unittest.TestCase): 14 | 15 | def setUp(self): 16 | self.file = FILE_NAME 17 | f = netCDF4.Dataset(self.file,'w') 18 | x = f.createDimension('x',10) 19 | # create groups in path if they don't already exist 20 | v = f.createVariable('/grouped/data/v',float,('x',)) 21 | g = f.groups['grouped'] 22 | # create groups underneath 'grouped' 23 | v2 = g.createVariable('./data/data2/v2',float,('x',)) 24 | f.close() 25 | 26 | def tearDown(self): 27 | # Remove the temporary files 28 | os.remove(self.file) 29 | 30 | def runTest(self): 31 | """testing implicit group and creation and Dataset.__getitem__""" 32 | f = netCDF4.Dataset(self.file, 'r') 33 | v1 = f['/grouped/data/v'] 34 | v2 = ((f.groups['grouped']).groups['data']).variables['v'] 35 | g = f['/grouped/data'] 36 | v3 = g['data2/v2'] 37 | assert v1 == v2 38 | assert g == f.groups['grouped'].groups['data'] 39 | assert v3.name == 'v2' 40 | f.close() 41 | 42 | if __name__ == '__main__': 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /man/nc4tonc3.1: -------------------------------------------------------------------------------- 1 | .\" (C) Copyright 2015, Ross Gammon , 2 | .\" 3 | .TH NC4TONC3 1 "22 Mar 2015" 4 | .\" 5 | .SH NAME 6 | nc4tonc3 \- a program to convert a classic netCDF 4 file to netCDF 3 format 7 | .SH SYNOPSIS 8 | .B nc4tonc3 9 | .RB [ \-h ] 10 | .RB [ \-o ] 11 | .RB [ \-\-chunk ] 12 | .I netcdf4filename 13 | .I netcdf3filename 14 | .br 15 | .SH DESCRIPTION 16 | This manual page documents briefly the 17 | .B nc4tonc3 18 | command. 19 | .PP 20 | \fBnc4tonc3\fP is a program that converts a netCDF 4 file (in NETCDF4_CLASSIC format) to netCDF 3 format. 21 | .SH OPTIONS 22 | These programs follow the usual GNU command line syntax, with long 23 | options starting with two dashes (`-'). 24 | A summary of options is included below. 25 | .TP 26 | .B \-h 27 | Shows a summary of the available options. 28 | .TP 29 | .B \-o 30 | Overwrite destination file (default is to raise an error if output file already exists). 31 | .TP 32 | .B \-\-quiet=(0|1) 33 | If set to 1, don't print any diagnostic information. 34 | .TP 35 | .B \-\-format 36 | Choose the netcdf3 format to use. NETCDF3_64BIT is used by default, or it can be set to NETCDF3_CLASSIC. 37 | .TP 38 | .B \-\-chunk=(integer) 39 | The number of records along unlimited dimension to write at once. The default is 10. It is ignored if there is no unlimited dimension. If chunk=0, this means write all the data at once. 40 | .SH SEE ALSO 41 | .BR ncinfo (1), 42 | .BR nc3tonc4 (1). 43 | .br 44 | .SH AUTHOR 45 | This manual page was written by Ross Gammon based on the options displayed by nc3tonc4 \-h. 46 | -------------------------------------------------------------------------------- /test/test_cdf5.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset, __has_cdf5_format__ 2 | import numpy as np 3 | import sys, os, unittest, tempfile 4 | import struct 5 | from numpy.testing import assert_array_equal 6 | 7 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 8 | dimsize = np.iinfo(np.int32).max*2 # only allowed in CDF5 9 | ndim = 100 10 | arrdata = np.random.randint(np.iinfo(np.uint8).min,np.iinfo(np.uint8).max,size=ndim) 11 | 12 | 13 | @unittest.skipIf(not __has_cdf5_format__ or struct.calcsize("P") < 8, "no CDF5 support") 14 | class test_cdf5(unittest.TestCase): 15 | def setUp(self): 16 | self.netcdf_file = FILE_NAME 17 | nc = Dataset(self.netcdf_file,'w',format='NETCDF3_64BIT_DATA') 18 | # create a 64-bit dimension 19 | d = nc.createDimension('dim',dimsize) # 64-bit dimension 20 | # create an 8-bit unsigned integer variable 21 | v = nc.createVariable('var',np.uint8,'dim') 22 | v[:ndim] = arrdata 23 | # create a 64-bit integer attribute (issue #878) 24 | nc.setncattr('int64_attr', np.int64(-9223372036854775806)) 25 | nc.close() 26 | 27 | def tearDown(self): 28 | # Remove the temporary files 29 | os.remove(self.netcdf_file) 30 | 31 | def runTest(self): 32 | """testing NETCDF3_64BIT_DATA format (CDF-5)""" 33 | f = Dataset(self.netcdf_file, 'r') 34 | assert f.dimensions['dim'].size == dimsize 35 | assert_array_equal(arrdata, f.variables['var'][:ndim]) 36 | assert (type(f.int64_attr) == np.int64) 37 | f.close() 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /test/test_chunk_cache.py: -------------------------------------------------------------------------------- 1 | import unittest, netCDF4, tempfile, os 2 | 3 | file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 4 | cache_size = 10000 5 | cache_nelems = 100 6 | cache_preempt = 0.5 7 | cache_size2 = 20000 8 | cache_nelems2 = 200 9 | cache_preempt2 = 1.0 10 | 11 | class RefCountTestCase(unittest.TestCase): 12 | 13 | def setUp(self): 14 | nc = netCDF4.Dataset(file_name, mode='w', format='NETCDF4') 15 | d = nc.createDimension('fred', 2000) 16 | # can only change cache size in createVariable (not nelems or preemption) 17 | # this change lasts only as long as file is open. 18 | v = nc.createVariable('frank','f',('fred',),chunk_cache=15000) 19 | size, nelems, preempt = v.get_var_chunk_cache() 20 | assert size==15000 21 | self.file=file_name 22 | nc.close() 23 | 24 | def tearDown(self): 25 | # Remove the temporary files 26 | os.remove(self.file) 27 | 28 | def runTest(self): 29 | """testing methods for accessing and changing chunk cache""" 30 | # change cache parameters before opening fil. 31 | netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt) 32 | nc = netCDF4.Dataset(self.file, mode='r') 33 | # check to see that chunk cache parameters were changed. 34 | assert netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt) 35 | # change cache parameters for variable, check 36 | nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2) 37 | assert nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2) 38 | nc.close() 39 | 40 | if __name__ == '__main__': 41 | unittest.main() 42 | -------------------------------------------------------------------------------- /src/netCDF4/__init__.py: -------------------------------------------------------------------------------- 1 | # init for netCDF4. package 2 | # if HDF5_PLUGIN_PATH not set, point to package path if plugins live there 3 | import os 4 | pluginpath = os.path.join(__path__[0],'plugins') 5 | if 'HDF5_PLUGIN_PATH' not in os.environ and\ 6 | (os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.so')) or\ 7 | os.path.exists(os.path.join(pluginpath,'__nczhdf5filters.dll')) or\ 8 | os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.dylib'))): 9 | os.environ['HDF5_PLUGIN_PATH']=pluginpath 10 | # Docstring comes from extension module _netCDF4. 11 | from ._netCDF4 import * 12 | # Need explicit imports for names beginning with underscores 13 | from ._netCDF4 import __doc__ 14 | from ._netCDF4 import (__version__, __netcdf4libversion__, __hdf5libversion__, 15 | __has_rename_grp__, __has_nc_inq_path__, 16 | __has_nc_inq_format_extended__, __has_nc_open_mem__, 17 | __has_nc_create_mem__, __has_cdf5_format__, 18 | __has_parallel4_support__, __has_pnetcdf_support__, 19 | __has_quantization_support__, __has_zstandard_support__, 20 | __has_bzip2_support__, __has_blosc_support__, __has_szip_support__, 21 | __has_set_alignment__, __has_parallel_support__, __has_ncfilter__, __has_nc_rc_set__) 22 | __all__ = [ 23 | 'Dataset', 'Variable', 'Dimension', 'Group', 'MFDataset', 'MFTime', 'CompoundType', 24 | 'VLType', 'date2num', 'num2date', 'date2index', 'stringtochar', 'chartostring', 25 | 'stringtoarr', 'getlibversion', 'EnumType', 'get_chunk_cache', 'set_chunk_cache', 26 | 'set_alignment', 'get_alignment', 'rc_get', 'rc_set', 27 | ] 28 | __pdoc__ = {'utils': False} 29 | -------------------------------------------------------------------------------- /examples/mpi_example.py: -------------------------------------------------------------------------------- 1 | # to run: mpirun -np 4 python mpi_example.py 2 | import sys 3 | from mpi4py import MPI 4 | import numpy as np 5 | from netCDF4 import Dataset 6 | 7 | 8 | nc_format = 'NETCDF4_CLASSIC' if len(sys.argv) < 2 else sys.argv[1] 9 | 10 | rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) 11 | if rank == 0: 12 | print('Creating file with format {}'.format(nc_format)) 13 | nc = Dataset( 14 | "parallel_test.nc", 15 | "w", 16 | parallel=True, 17 | comm=MPI.COMM_WORLD, 18 | info=MPI.Info(), 19 | format=nc_format, # type: ignore # we'll assume it's OK 20 | ) 21 | # below should work also - MPI_COMM_WORLD and MPI_INFO_NULL will be used. 22 | #nc = Dataset('parallel_test.nc', 'w', parallel=True) 23 | d = nc.createDimension('dim',4) 24 | v = nc.createVariable('var', np.int32, 'dim') 25 | v[rank] = rank 26 | 27 | # switch to collective mode, rewrite the data. 28 | v.set_collective(True) 29 | v[rank] = rank 30 | nc.close() 31 | 32 | # reopen the file read-only, check the data 33 | nc = Dataset('parallel_test.nc', parallel=True, comm=MPI.COMM_WORLD, 34 | info=MPI.Info()) 35 | assert rank==nc['var'][rank] 36 | nc.close() 37 | 38 | # reopen the file in append mode, modify the data on the last rank. 39 | nc = Dataset('parallel_test.nc', 'a',parallel=True, comm=MPI.COMM_WORLD, 40 | info=MPI.Info()) 41 | if rank == 3: v[rank] = 2*rank 42 | nc.close() 43 | 44 | # reopen the file read-only again, check the data. 45 | # leave out the comm and info kwargs to check that the defaults 46 | # (MPI_COMM_WORLD and MPI_INFO_NULL) work. 47 | nc = Dataset('parallel_test.nc', parallel=True) 48 | if rank == 3: 49 | assert 2*rank==nc['var'][rank] 50 | else: 51 | assert rank==nc['var'][rank] 52 | nc.close() 53 | -------------------------------------------------------------------------------- /test/test_get_fill_value.py: -------------------------------------------------------------------------------- 1 | import unittest, os, tempfile 2 | import netCDF4 3 | from numpy.testing import assert_array_equal 4 | import numpy as np 5 | 6 | fill_val = np.array(9.9e31) 7 | 8 | # test Variable.get_fill_value 9 | 10 | class TestGetFillValue(unittest.TestCase): 11 | def setUp(self): 12 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 13 | f = netCDF4.Dataset(self.testfile, 'w') 14 | dim = f.createDimension('x',10) 15 | for dt in netCDF4.default_fillvals.keys(): 16 | if not dt.startswith('c'): 17 | v = f.createVariable(dt+'_var',dt,dim) 18 | v = f.createVariable('float_var',np.float64,dim,fill_value=fill_val) 19 | # test fill_value='default' option (issue #1374) 20 | v2 = f.createVariable('float_var2',np.float64,dim,fill_value='default') 21 | f.close() 22 | 23 | def tearDown(self): 24 | os.remove(self.testfile) 25 | 26 | def runTest(self): 27 | f = netCDF4.Dataset(self.testfile, "r") 28 | # no _FillValue set, test that default fill value returned 29 | for dt in netCDF4.default_fillvals.keys(): 30 | if not dt.startswith('c'): 31 | fillval = np.array(netCDF4.default_fillvals[dt]) 32 | if dt == 'S1': fillval = fillval.astype(dt) 33 | v = f[dt+'_var'] 34 | assert_array_equal(fillval, v.get_fill_value()) 35 | # _FillValue attribute is set. 36 | v = f['float_var'] 37 | assert_array_equal(fill_val, v.get_fill_value()) 38 | v = f['float_var2'] 39 | assert_array_equal(np.array(netCDF4.default_fillvals['f8']), v._FillValue) 40 | f.close() 41 | 42 | if __name__ == '__main__': 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /test/test_compression_szip.py: -------------------------------------------------------------------------------- 1 | from numpy.random.mtrand import uniform 2 | from netCDF4 import Dataset 3 | from numpy.testing import assert_almost_equal 4 | import os, tempfile, unittest, sys 5 | from filter_availability import has_szip_filter 6 | 7 | ndim = 100000 8 | filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 9 | datarr = uniform(size=(ndim,)) 10 | 11 | def write_netcdf(filename,dtype='f8'): 12 | nc = Dataset(filename,'w') 13 | nc.createDimension('n', ndim) 14 | foo = nc.createVariable('data',\ 15 | dtype,('n'),compression=None) 16 | foo_szip = nc.createVariable('data_szip',\ 17 | dtype,('n'),compression='szip',szip_coding='ec',szip_pixels_per_block=32) 18 | foo[:] = datarr 19 | foo_szip[:] = datarr 20 | nc.close() 21 | 22 | 23 | @unittest.skipIf(not has_szip_filter, "szip filter not available") 24 | class CompressionTestCase(unittest.TestCase): 25 | def setUp(self): 26 | self.filename = filename 27 | write_netcdf(self.filename) 28 | 29 | def tearDown(self): 30 | # Remove the temporary files 31 | os.remove(self.filename) 32 | 33 | def runTest(self): 34 | f = Dataset(self.filename) 35 | assert_almost_equal(datarr,f.variables['data'][:]) 36 | assert f.variables['data'].filters() ==\ 37 | {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} 38 | assert_almost_equal(datarr,f.variables['data_szip'][:]) 39 | dtest = {'zlib': False, 'szip': {'coding': 'ec', 'pixels_per_block': 32}, 'zstd': False, 'bzip2': False, 'blosc': False, 'shuffle': False, 'complevel': 0, 'fletcher32': False} 40 | assert f.variables['data_szip'].filters() == dtest 41 | f.close() 42 | 43 | 44 | if __name__ == '__main__': 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /examples/bench.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from typing import TYPE_CHECKING, Any 4 | from numpy.random.mtrand import uniform 5 | import netCDF4 6 | from timeit import Timer 7 | import os, sys 8 | if TYPE_CHECKING: 9 | from netCDF4 import Format as NCFormat 10 | else: 11 | NCFormat = Any 12 | 13 | # create an n1dim by n2dim by n3dim random array. 14 | n1dim = 30 15 | n2dim = 15 16 | n3dim = 73 17 | n4dim = 144 18 | ntrials = 10 19 | sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) 20 | array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) 21 | 22 | def write_netcdf(filename,zlib=False,least_significant_digit=None,format: NCFormat='NETCDF4'): 23 | file = netCDF4.Dataset(filename,'w',format=format) 24 | file.createDimension('n1', n1dim) 25 | file.createDimension('n2', n2dim) 26 | file.createDimension('n3', n3dim) 27 | file.createDimension('n4', n4dim) 28 | foo = file.createVariable('data', 'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=least_significant_digit) 29 | foo[:] = array 30 | file.close() 31 | 32 | def read_netcdf(filename): 33 | file = netCDF4.Dataset(filename) 34 | data = file.variables['data'][:] 35 | file.close() 36 | 37 | for format in ['NETCDF3_CLASSIC','NETCDF3_64BIT','NETCDF4_CLASSIC','NETCDF4']: 38 | sys.stdout.write('testing file format %s ...\n' % format) 39 | # writing, no compression. 40 | t = Timer("write_netcdf('test1.nc',format='%s')" % format,"from __main__ import write_netcdf") 41 | sys.stdout.write('writing took %s seconds\n' %\ 42 | repr(sum(t.repeat(ntrials,1))/ntrials)) 43 | # test reading. 44 | t = Timer("read_netcdf('test1.nc')","from __main__ import read_netcdf") 45 | sys.stdout.write('reading took %s seconds\n' % 46 | repr(sum(t.repeat(ntrials,1))/ntrials)) 47 | -------------------------------------------------------------------------------- /examples/complex_numbers.py: -------------------------------------------------------------------------------- 1 | import netCDF4 2 | import numpy as np 3 | 4 | complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j], dtype="c16") 5 | np_dt = np.dtype([("r", np.float64), ("i", np.float64)]) 6 | complex_struct_array = np.array( 7 | [(r, i) for r, i in zip(complex_array.real, complex_array.imag)], 8 | dtype=np_dt, 9 | ) 10 | 11 | print("\n**********") 12 | print("Reading a file that uses a dimension for complex numbers") 13 | filename = "complex_numbers_as_dimension.nc" 14 | 15 | with netCDF4.Dataset(filename, "w") as f: 16 | f.createDimension("x", size=len(complex_array)) 17 | f.createDimension("complex", size=2) 18 | c_ri = f.createVariable("data_dim", np.float64, ("x", "complex")) 19 | as_dim_array = np.vstack((complex_array.real, complex_array.imag)).T 20 | c_ri[:] = as_dim_array 21 | 22 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 23 | print(f["data_dim"]) 24 | 25 | 26 | print("\n**********") 27 | print("Reading a file that uses a compound datatype for complex numbers") 28 | filename = "complex_numbers_as_datatype.nc" 29 | 30 | with netCDF4.Dataset(filename, "w") as f: 31 | f.createDimension("x", size=len(complex_array)) 32 | nc_dt = f.createCompoundType(np_dt, "nc_complex") 33 | breakpoint() 34 | 35 | c_struct = f.createVariable("data_struct", nc_dt, ("x",)) 36 | c_struct[:] = complex_struct_array 37 | 38 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 39 | print(f["data_struct"]) 40 | 41 | print("\n**********") 42 | print("Writing complex numbers to a file") 43 | filename = "writing_complex_numbers.nc" 44 | with netCDF4.Dataset(filename, "w", auto_complex=True) as f: 45 | f.createDimension("x", size=len(complex_array)) 46 | c_var = f.createVariable("data", np.complex128, ("x",)) 47 | c_var[:] = complex_array 48 | print(c_var) 49 | 50 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 51 | print(f["data"]) 52 | -------------------------------------------------------------------------------- /test/test_get_variables_by_attributes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import netCDF4 5 | 6 | class VariablesByAttributesTests(unittest.TestCase): 7 | 8 | def setUp(self): 9 | netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc") 10 | self.nc = netCDF4.Dataset(netcdf_file) 11 | 12 | def test_find_variables_by_single_attribute(self): 13 | vs = self.nc.get_variables_by_attributes(axis='Z') 14 | self.assertEqual(len(vs), 1) 15 | 16 | vs = self.nc.get_variables_by_attributes(units='m/s') 17 | self.assertEqual(len(vs), 4) 18 | 19 | def test_find_variables_by_multiple_attribute(self): 20 | vs = self.nc.get_variables_by_attributes(axis='Z', units='m') 21 | self.assertEqual(len(vs), 1) 22 | 23 | def test_find_variables_by_single_lambda(self): 24 | vs = self.nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T']) 25 | self.assertEqual(len(vs), 1) 26 | 27 | vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None) 28 | self.assertEqual(len(vs), 12) 29 | 30 | def test_find_variables_by_multiple_lambdas(self): 31 | vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, 32 | long_name=lambda v: v is not None and 'Upward (w) velocity' in v) 33 | self.assertEqual(len(vs), 1) 34 | 35 | def test_find_variables_by_attribute_and_lambda(self): 36 | vs = self.nc.get_variables_by_attributes(units='m/s', 37 | grid_mapping=lambda v: v is not None) 38 | self.assertEqual(len(vs), 4) 39 | 40 | vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, 41 | long_name='Upward (w) velocity') 42 | self.assertEqual(len(vs), 1) 43 | 44 | if __name__ == '__main__': 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /examples/threaded_read.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset 2 | from numpy.testing import assert_array_equal, assert_array_almost_equal 3 | import numpy as np 4 | import threading 5 | import queue 6 | import time 7 | 8 | # demonstrate reading of different files from different threads. 9 | # Releasing the Global Interpreter Lock (GIL) when calling the 10 | # netcdf C library for read operations speeds up the reads 11 | # when threads are used (issue 369). 12 | # Test script contributed by Ryan May of Unidata. 13 | 14 | # Make some files 15 | nfiles = 4 16 | fnames = []; datal = [] 17 | for i in range(nfiles): 18 | fname = 'test%d.nc' % i 19 | fnames.append(fname) 20 | nc = Dataset(fname, 'w') 21 | data = np.random.randn(500, 500, 500) 22 | datal.append(data) 23 | nc.createDimension('x', 500) 24 | nc.createDimension('y', 500) 25 | nc.createDimension('z', 500) 26 | var = nc.createVariable('grid', 'f', ('x', 'y', 'z')) 27 | var[:] = data 28 | nc.close() 29 | 30 | # Queue them up 31 | items: queue.Queue = queue.Queue() 32 | for data,fname in zip(datal,fnames): 33 | items.put(fname) 34 | 35 | # Function for threads to use 36 | def get_data(serial=None): 37 | if serial is None: # if not called from a thread 38 | fname = items.get() 39 | else: 40 | fname = fnames[serial] 41 | nc = Dataset(fname, 'r') 42 | data2 = nc.variables['grid'][:] 43 | # make sure the data is correct 44 | #assert_array_almost_equal(data2,datal[int(fname[4])]) 45 | nc.close() 46 | if serial is None: 47 | items.task_done() 48 | 49 | # Time it (no threading). 50 | start = time.time() 51 | for i in range(nfiles): 52 | get_data(serial=i) 53 | end = time.time() 54 | print('no threads, time = ',end - start) 55 | 56 | # with threading. 57 | start = time.time() 58 | for i in range(nfiles): 59 | threading.Thread(target=get_data).start() 60 | items.join() 61 | end = time.time() 62 | print('with threading, time = ',end - start) 63 | -------------------------------------------------------------------------------- /test/test_scalarvar.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | from numpy.testing import assert_almost_equal 6 | import netCDF4 7 | import math 8 | 9 | VAR_NAME='temp' 10 | VAR_TYPE='f4' 11 | VAR_VAL=math.pi 12 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 13 | GROUP_NAME = 'subgroup' 14 | 15 | # test scalar variable creation and retrieval. 16 | 17 | class ScalarVariableTestCase(unittest.TestCase): 18 | 19 | def setUp(self): 20 | self.file = FILE_NAME 21 | rootgrp = netCDF4.Dataset(self.file, 'w') 22 | # scalar variable. 23 | temp = rootgrp.createVariable(VAR_NAME,VAR_TYPE) 24 | #temp[:] = VAR_VAL 25 | temp.assignValue(VAR_VAL) 26 | subgroup = rootgrp.createGroup(GROUP_NAME) 27 | tempg = subgroup.createVariable(VAR_NAME,VAR_TYPE) 28 | tempg[:] = VAR_VAL 29 | #tempg.assignValue(VAR_VAL) 30 | rootgrp.close() 31 | 32 | def tearDown(self): 33 | # Remove the temporary file 34 | os.remove(self.file) 35 | 36 | def runTest(self): 37 | """testing scalar variables""" 38 | # check dimensions in root group. 39 | f = netCDF4.Dataset(self.file, 'r+') 40 | v = f.variables[VAR_NAME] 41 | # dimensions and shape should be empty tuples 42 | self.assertTrue(v.dimensions == ()) 43 | self.assertTrue(v.shape == ()) 44 | # check result of getValue and slice 45 | assert_almost_equal(v.getValue(), VAR_VAL, decimal=6) 46 | assert_almost_equal(v[:], VAR_VAL, decimal=6) 47 | g = f.groups[GROUP_NAME] 48 | vg = g.variables[VAR_NAME] 49 | # dimensions and shape should be empty tuples 50 | self.assertTrue(vg.dimensions == ()) 51 | self.assertTrue(vg.shape == ()) 52 | # check result of getValue and slice 53 | assert_almost_equal(vg.getValue(), VAR_VAL, decimal=6) 54 | assert_almost_equal(vg[:], VAR_VAL, decimal=6) 55 | f.close() 56 | 57 | if __name__ == '__main__': 58 | unittest.main() 59 | -------------------------------------------------------------------------------- /test/test_masked5.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import tempfile 4 | 5 | import numpy as np 6 | from numpy import ma 7 | from numpy.testing import assert_array_equal 8 | from netCDF4 import Dataset, __netcdf4libversion__ 9 | 10 | # Test use of vector of missing values. 11 | 12 | class VectorMissingValues(unittest.TestCase): 13 | 14 | def setUp(self): 15 | 16 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 17 | 18 | self.missing_values = [-999,999,0] 19 | self.v = np.array([-999,0,1,2,3,999], dtype = "i2") 20 | self.v_ma = ma.array([-1,0,1,2,3,4], dtype = "i2", \ 21 | mask = [True, True, False, False, False, True]) 22 | 23 | f = Dataset(self.testfile, 'w') 24 | d = f.createDimension('x',6) 25 | v = f.createVariable('v', "i2", 'x') 26 | # issue 730: set fill_value for vlen str vars 27 | v2 = f.createVariable('v2', str, 'x', fill_value='') 28 | 29 | v.missing_value = self.missing_values 30 | v[:] = self.v 31 | v2[0]='first' 32 | 33 | f.close() 34 | 35 | 36 | def tearDown(self): 37 | 38 | os.remove(self.testfile) 39 | 40 | 41 | def test_scaled(self): 42 | 43 | """Testing auto-conversion of masked arrays""" 44 | 45 | f = Dataset(self.testfile) 46 | v = f.variables["v"] 47 | v2 = f.variables["v2"] 48 | self.assertTrue(isinstance(v[:], ma.masked_array)) 49 | assert_array_equal(v[:], self.v_ma) 50 | assert_array_equal(v[2],self.v[2]) # issue #624. 51 | v.set_auto_mask(False) 52 | self.assertTrue(isinstance(v[:], np.ndarray)) 53 | assert_array_equal(v[:], self.v) 54 | 55 | # issue 730 56 | # this part fails with netcdf 4.1.3 57 | # a bug in vlen strings? 58 | if __netcdf4libversion__ >= '4.4.0': 59 | assert v2[0] == 'first' 60 | assert v2[1] == '' 61 | 62 | 63 | f.close() 64 | 65 | 66 | if __name__ == '__main__': 67 | unittest.main() 68 | -------------------------------------------------------------------------------- /examples/test_stringarr.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset, stringtochar, chartostring 2 | import random, numpy 3 | from typing import Final 4 | 5 | # test utilities for converting arrays of fixed-length strings 6 | # to arrays of characters (with an extra dimension), and vice-versa. 7 | 8 | # netCDF does not have a fixed-length string data-type (only characters 9 | # and variable length strings). The convenience function chartostring 10 | # converts an array of characters to an array of fixed-length strings. 11 | # The array of fixed length strings has one less dimension, and the 12 | # length of the strings is equal to the rightmost dimension of the 13 | # array of characters. The convenience function stringtochar goes 14 | # the other way, converting an array of fixed-length strings to an 15 | # array of characters with an extra dimension (the number of characters 16 | # per string) appended on the right. 17 | 18 | 19 | FILE_NAME = 'tst_stringarr.nc' 20 | FILE_FORMAT: Final = 'NETCDF4_CLASSIC' 21 | chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' 22 | 23 | nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT) 24 | n2 = 10; nchar = 12; nrecs = 4 25 | nc.createDimension('n1',None) 26 | nc.createDimension('n2',n2) 27 | nc.createDimension('nchar',nchar) 28 | v = nc.createVariable('strings','S1',('n1','n2','nchar')) 29 | for nrec in range(nrecs): 30 | data = numpy.empty((n2,),'S'+repr(nchar)) 31 | # fill data with random nchar character strings 32 | for n in range(n2): 33 | data[n] = ''.join([random.choice(chars) for i in range(nchar)]) 34 | print(nrec,data) 35 | # convert data to array of characters with an extra dimension 36 | # (the number of characters per string) added to the right. 37 | datac = stringtochar(data) 38 | v[nrec] = datac 39 | nc.close() 40 | 41 | nc = Dataset(FILE_NAME) 42 | v = nc.variables['strings'] 43 | print(v.shape, v.dtype) 44 | for nrec in range(nrecs): 45 | # read character array back, convert to an array of strings 46 | # of length equal to the rightmost dimension. 47 | print(nrec, chartostring(v[nrec])) 48 | nc.close() 49 | -------------------------------------------------------------------------------- /test/test_multiple_open_close.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tracemalloc 3 | import unittest 4 | 5 | import netCDF4 6 | 7 | 8 | @unittest.skipUnless( 9 | os.getenv("MEMORY_LEAK_TEST"), "computationally intensive test not enabled" 10 | ) 11 | class MultipleVariablesByAttributesCallsTests(unittest.TestCase): 12 | def test_multiple_calls(self): 13 | netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc") 14 | tracemalloc.start() 15 | snapshot = tracemalloc.take_snapshot() 16 | 17 | k_times = 10 18 | for _k in range(k_times): 19 | nc = netCDF4.Dataset(netcdf_file) 20 | 21 | vs = nc.get_variables_by_attributes(axis='Z') 22 | self.assertEqual(len(vs), 1) 23 | 24 | vs = nc.get_variables_by_attributes(units='m/s') 25 | self.assertEqual(len(vs), 4) 26 | 27 | vs = nc.get_variables_by_attributes(axis='Z', units='m') 28 | self.assertEqual(len(vs), 1) 29 | 30 | vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T']) 31 | self.assertEqual(len(vs), 1) 32 | 33 | vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None) 34 | self.assertEqual(len(vs), 12) 35 | 36 | vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name=lambda v: v is not None and 'Upward (w) velocity' in v) 37 | self.assertEqual(len(vs), 1) 38 | 39 | vs = nc.get_variables_by_attributes(units='m/s', grid_mapping=lambda v: v is not None) 40 | self.assertEqual(len(vs), 4) 41 | 42 | vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name='Upward (w) velocity') 43 | self.assertEqual(len(vs), 1) 44 | nc.close() 45 | stats = tracemalloc.take_snapshot().compare_to(snapshot, 'filename') 46 | tracemalloc.stop() 47 | print("[ Top 10 differences ]") 48 | for stat in stats[:10]: 49 | print(stat) 50 | 51 | if __name__ == '__main__': 52 | unittest.main() 53 | -------------------------------------------------------------------------------- /test/test_Unsigned.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import netCDF4 3 | from numpy.testing import assert_array_equal 4 | import numpy as np 5 | import pathlib 6 | 7 | test_dir = pathlib.Path(__file__).parent 8 | 9 | 10 | class Test_Unsigned(unittest.TestCase): 11 | """ 12 | Test autoconversion to unsigned ints when _Unsigned attribute is True. 13 | This attribute is is set by netcdf-java to designate unsigned 14 | integer data stored with a signed integer type in netcdf-3. 15 | If _Unsigned=True, a view to the data as unsigned integers is returned. 16 | set_autoscale can be used to turn this off (default is on) 17 | See issue #656 (pull request #658). 18 | """ 19 | def test_unsigned(self): 20 | with netCDF4.Dataset(test_dir / "ubyte.nc") as f: 21 | data = f['ub'][:] 22 | assert data.dtype.str[1:] == 'u1' 23 | assert_array_equal(data,np.array([0,255],np.uint8)) 24 | f.set_auto_scale(False) 25 | data2 = f['ub'][:] 26 | assert data2.dtype.str[1:] == 'i1' 27 | assert_array_equal(data2,np.array([0,-1],np.int8)) 28 | data = f['sb'][:] 29 | assert data.dtype.str[1:] == 'i1' 30 | # issue #1232 _Unsigned='false' is same as not having _Unsigned set. 31 | data = f['sb2'][:] 32 | assert data.dtype.str[1:] == 'i1' 33 | 34 | # issue 671 35 | with netCDF4.Dataset(test_dir / "issue671.nc") as f: 36 | data1 = f['soil_moisture'][:] 37 | assert np.ma.isMA(data1) 38 | f.set_auto_scale(False) 39 | data2 = f['soil_moisture'][:] 40 | assert data1.mask.sum() == data2.mask.sum() 41 | 42 | # issue 794 43 | # test that valid_min/valid_max/_FillValue are 44 | # treated as unsigned integers. 45 | with netCDF4.Dataset(test_dir / "20171025_2056.Cloud_Top_Height.nc") as f: 46 | data = f['HT'][:] 47 | assert data.mask.sum() == 57432 48 | assert int(data.max()) == 15430 49 | assert int(data.min()) == 0 50 | assert data.dtype == np.float32 51 | 52 | 53 | if __name__ == '__main__': 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /examples/bench_compress.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from typing import TYPE_CHECKING, Any 4 | from numpy.random.mtrand import uniform 5 | import netCDF4 6 | import netCDF4.utils 7 | from timeit import Timer 8 | import os, sys 9 | if TYPE_CHECKING: 10 | from netCDF4 import CompressionLevel 11 | else: 12 | CompressionLevel = Any 13 | 14 | # create an n1dim by n2dim by n3dim random array. 15 | n1dim = 30 16 | n2dim = 15 17 | n3dim = 73 18 | n4dim = 144 19 | ntrials = 10 20 | sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) 21 | sys.stdout.write('(average of %s trials)\n' % ntrials) 22 | array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4) 23 | 24 | def write_netcdf(filename,zlib=False,shuffle=False,complevel: CompressionLevel = 6): 25 | file = netCDF4.Dataset(filename,'w',format='NETCDF4') 26 | file.createDimension('n1', n1dim) 27 | file.createDimension('n2', n2dim) 28 | file.createDimension('n3', n3dim) 29 | file.createDimension('n4', n4dim) 30 | foo = file.createVariable('data',\ 31 | 'f8',('n1','n2','n3','n4'),zlib=zlib,shuffle=shuffle,complevel=complevel) 32 | foo[:] = array 33 | file.close() 34 | 35 | def read_netcdf(filename): 36 | file = netCDF4.Dataset(filename) 37 | data = file.variables['data'][:] 38 | file.close() 39 | 40 | for compress_kwargs in ["zlib=False,shuffle=False","zlib=True,shuffle=False", 41 | "zlib=True,shuffle=True","zlib=True,shuffle=True,complevel=2"]: 42 | sys.stdout.write('testing compression %s...\n' % repr(compress_kwargs)) 43 | # writing. 44 | t = Timer("write_netcdf('test.nc',%s)" % compress_kwargs,"from __main__ import write_netcdf") 45 | sys.stdout.write('writing took %s seconds\n' %\ 46 | repr(sum(t.repeat(ntrials,1))/ntrials)) 47 | # test reading. 48 | t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") 49 | sys.stdout.write('reading took %s seconds\n' % 50 | repr(sum(t.repeat(ntrials,1))/ntrials)) 51 | # print out size of resulting files. 52 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 53 | -------------------------------------------------------------------------------- /examples/bench_compress4.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from typing import Literal 4 | from numpy.random.mtrand import uniform 5 | import netCDF4 6 | from timeit import Timer 7 | import os, sys 8 | 9 | # use real data. 10 | URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc" 11 | nc = netCDF4.Dataset(URL) 12 | 13 | # use real 500 hPa geopotential height data. 14 | n1dim = 100 15 | n3dim = 73 16 | n4dim = 144 17 | ntrials = 10 18 | sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim)) 19 | sys.stdout.write('(average of %s trials)\n\n' % ntrials) 20 | array = nc.variables['hgt'][0:n1dim,5,:,:] 21 | 22 | 23 | def write_netcdf( 24 | filename, 25 | nsd, 26 | quantize_mode: Literal["BitGroom", "BitRound", "GranularBitRound"] = "BitGroom" 27 | ): 28 | file = netCDF4.Dataset(filename,'w',format='NETCDF4') 29 | file.createDimension('n1', None) 30 | file.createDimension('n3', n3dim) 31 | file.createDimension('n4', n4dim) 32 | foo = file.createVariable('data',\ 33 | 'f4',('n1','n3','n4'),\ 34 | zlib=True,shuffle=True,\ 35 | quantize_mode=quantize_mode,\ 36 | significant_digits=nsd) 37 | foo[:] = array 38 | file.close() 39 | 40 | def read_netcdf(filename): 41 | file = netCDF4.Dataset(filename) 42 | data = file.variables['data'][:] 43 | file.close() 44 | 45 | for sigdigits in range(1,5,1): 46 | sys.stdout.write('testing compression with significant_digits=%s...\n' %\ 47 | sigdigits) 48 | write_netcdf('test.nc',sigdigits) 49 | read_netcdf('test.nc') 50 | # print out size of resulting files with standard quantization. 51 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 52 | sys.stdout.write("testing compression with significant_digits=%s and 'GranularBitRound'...\n" %\ 53 | sigdigits) 54 | write_netcdf('test.nc',sigdigits,quantize_mode='GranularBitRound') 55 | read_netcdf('test.nc') 56 | # print out size of resulting files with alternate quantization. 57 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 58 | -------------------------------------------------------------------------------- /test/test_unlimdim.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import numpy as np 6 | from numpy.random.mtrand import uniform 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | import netCDF4 9 | 10 | # test creating variables with unlimited dimensions, 11 | # writing to and retrieving data from such variables. 12 | 13 | # create an n1dim by n2dim by n3dim random array 14 | n1dim = 4 15 | n2dim = 10 16 | n3dim = 8 17 | ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim)) 18 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 19 | 20 | class UnlimdimTestCase(unittest.TestCase): 21 | 22 | def setUp(self): 23 | self.file = FILE_NAME 24 | f = netCDF4.Dataset(self.file, 'w') 25 | # foo has a single unlimited dimension 26 | f.createDimension('n1', n1dim) 27 | f.createDimension('n2', None) 28 | f.createDimension('n3', n3dim) 29 | foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) 30 | # write some data to it. 31 | #foo[:,0:n2dim,:] = ranarr 32 | foo[:] = ranarr 33 | foo[:,n2dim:,:] = 2.*ranarr 34 | # bar has 2 unlimited dimensions 35 | f.createDimension('n4', None) 36 | f.createDimension('n5', n2dim) 37 | f.createDimension('n6', None) 38 | # write some data to it. 39 | bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n4','n5','n6')) 40 | # bar[0:n1dim,:, 0:n3dim] = ranarr 41 | bar[0:n1dim,:, 0:n3dim] = 2.0 42 | f.close() 43 | 44 | def tearDown(self): 45 | # Remove the temporary files 46 | os.remove(self.file) 47 | 48 | def runTest(self): 49 | """testing unlimited dimensions""" 50 | f = netCDF4.Dataset(self.file, 'r') 51 | foo = f.variables['data1'] 52 | # check shape. 53 | self.assertTrue(foo.shape == (n1dim,2*n2dim,n3dim)) 54 | # check data. 55 | assert_array_almost_equal(foo[:,0:n2dim,:], ranarr) 56 | assert_array_almost_equal(foo[:,n2dim:2*n2dim,:], 2.*ranarr) 57 | bar = f.variables['data2'] 58 | # check shape. 59 | self.assertTrue(bar.shape == (n1dim,n2dim,n3dim)) 60 | # check data. 61 | #assert_array_almost_equal(bar[:,:,:], ranarr) 62 | assert_array_almost_equal(bar[:,:,:], 2.*np.ones((n1dim,n2dim,n3dim),ranarr.dtype)) 63 | f.close() 64 | 65 | if __name__ == '__main__': 66 | unittest.main() 67 | -------------------------------------------------------------------------------- /test/test_compression_zstd.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Any 2 | from numpy.random.mtrand import uniform 3 | from netCDF4 import Dataset 4 | from numpy.testing import assert_almost_equal 5 | import os, tempfile, unittest, sys 6 | from filter_availability import no_plugins, has_zstd_filter 7 | if TYPE_CHECKING: 8 | from netCDF4 import CompressionLevel 9 | else: 10 | CompressionLevel = Any 11 | 12 | ndim = 100000 13 | filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 14 | filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 15 | array = uniform(size=(ndim,)) 16 | 17 | def write_netcdf(filename,dtype='f8',complevel: CompressionLevel = 6): 18 | nc = Dataset(filename,'w') 19 | nc.createDimension('n', ndim) 20 | foo = nc.createVariable('data',\ 21 | dtype,('n'),compression='zstd',complevel=complevel) 22 | foo[:] = array 23 | nc.close() 24 | 25 | 26 | @unittest.skipIf(no_plugins or not has_zstd_filter, "zstd filter not available") 27 | class CompressionTestCase(unittest.TestCase): 28 | def setUp(self): 29 | self.filename1 = filename1 30 | self.filename2 = filename2 31 | write_netcdf(self.filename1,complevel=0) # no compression 32 | write_netcdf(self.filename2,complevel=4) # with compression 33 | 34 | def tearDown(self): 35 | # Remove the temporary files 36 | os.remove(self.filename1) 37 | os.remove(self.filename2) 38 | 39 | def runTest(self): 40 | uncompressed_size = os.stat(self.filename1).st_size 41 | # check uncompressed data 42 | f = Dataset(self.filename1) 43 | size = os.stat(self.filename1).st_size 44 | assert_almost_equal(array,f.variables['data'][:]) 45 | assert f.variables['data'].filters() ==\ 46 | {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} 47 | assert_almost_equal(size,uncompressed_size) 48 | f.close() 49 | # check compressed data. 50 | f = Dataset(self.filename2) 51 | size = os.stat(self.filename2).st_size 52 | assert_almost_equal(array,f.variables['data'][:]) 53 | assert f.variables['data'].filters() ==\ 54 | {'zlib':False,'szip':False,'zstd':True,'bzip2':False,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} 55 | assert size < 0.96*uncompressed_size 56 | f.close() 57 | 58 | if __name__ == '__main__': 59 | unittest.main() 60 | -------------------------------------------------------------------------------- /test/test_compression_bzip2.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Any 2 | from numpy.random.mtrand import uniform 3 | from netCDF4 import Dataset 4 | from numpy.testing import assert_almost_equal 5 | import os, tempfile, unittest, sys 6 | from filter_availability import no_plugins, has_bzip2_filter 7 | if TYPE_CHECKING: 8 | from netCDF4 import CompressionLevel 9 | else: 10 | CompressionLevel = Any 11 | 12 | ndim = 100000 13 | filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 14 | filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 15 | array = uniform(size=(ndim,)) 16 | 17 | def write_netcdf(filename,dtype='f8',complevel: CompressionLevel = 6): 18 | nc = Dataset(filename,'w') 19 | nc.createDimension('n', ndim) 20 | foo = nc.createVariable('data',\ 21 | dtype,('n'),compression='bzip2',complevel=complevel) 22 | foo[:] = array 23 | nc.close() 24 | 25 | 26 | @unittest.skipIf(no_plugins or not has_bzip2_filter, "bzip2 filter not available") 27 | class CompressionTestCase(unittest.TestCase): 28 | def setUp(self): 29 | self.filename1 = filename1 30 | self.filename2 = filename2 31 | write_netcdf(self.filename1,complevel=0) # no compression 32 | write_netcdf(self.filename2,complevel=4) # with compression 33 | 34 | def tearDown(self): 35 | # Remove the temporary files 36 | os.remove(self.filename1) 37 | os.remove(self.filename2) 38 | 39 | def runTest(self): 40 | uncompressed_size = os.stat(self.filename1).st_size 41 | # check uncompressed data 42 | f = Dataset(self.filename1) 43 | size = os.stat(self.filename1).st_size 44 | assert_almost_equal(array,f.variables['data'][:]) 45 | assert f.variables['data'].filters() ==\ 46 | {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} 47 | assert_almost_equal(size,uncompressed_size) 48 | f.close() 49 | # check compressed data. 50 | f = Dataset(self.filename2) 51 | size = os.stat(self.filename2).st_size 52 | assert_almost_equal(array,f.variables['data'][:]) 53 | assert f.variables['data'].filters() ==\ 54 | {'zlib':False,'szip':False,'zstd':False,'bzip2':True,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} 55 | assert size < 0.96*uncompressed_size 56 | f.close() 57 | 58 | 59 | if __name__ == '__main__': 60 | unittest.main() 61 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # Rename this file to setup.cfg to set build options. 2 | # Follow instructions below for editing. 3 | [options] 4 | # if true, the nc-config script (installed with netcdf 4.1.2 and higher) 5 | # will be used to determine the locations of required libraries. 6 | # Usually, nothing else is needed. 7 | use_ncconfig=True 8 | # path to nc-config script (use if not found in unix PATH). 9 | #ncconfig=/usr/local/bin/nc-config 10 | [directories] 11 | # 12 | # If nc-config doesn't do the trick, you can specify the locations 13 | # of the libraries and headers manually below 14 | # 15 | # uncomment and set to netCDF install location. 16 | # Include files should be located in netCDF4_dir/include and 17 | # the library should be located in netCDF4_dir/lib. 18 | # If the libraries and include files are installed in separate locations, 19 | # use netCDF4_libdir and netCDF4_incdir to specify the locations 20 | # separately. 21 | #netCDF4_dir = /usr/local 22 | # uncomment and set to HDF5 install location. 23 | # Include files should be located in HDF5_dir/include and 24 | # the library should be located in HDF5_dir/lib. 25 | # If the libraries and include files are installed in separate locations, 26 | # use HDF5_libdir and HDF5_incdir to specify the locations 27 | # separately. 28 | #HDF5_dir = /usr/local 29 | # if HDF5 was built with szip support as a static lib, 30 | # uncomment and set to szip lib install location. 31 | # If the libraries and include files are installed in separate locations, 32 | # use szip_libdir and szip_incdir. 33 | #szip_dir = /usr/local 34 | # if netcdf lib was build statically with HDF4 support, 35 | # uncomment and set to hdf4 lib (libmfhdf and libdf) install location. 36 | # If the libraries and include files are installed in separate locations, 37 | # use hdf4_libdir and hdf4_incdir. 38 | #hdf4_dir = /usr/local 39 | # if netcdf lib was build statically with HDF4 support, 40 | # uncomment and set to jpeg lib install location (hdf4 needs jpeg). 41 | # If the libraries and include files are installed in separate locations, 42 | # use jpeg_libdir and jpeg_incdir. 43 | #jpeg_dir = /usr/local 44 | # if netcdf lib was build statically with OpenDAP support, 45 | # uncomment and set to curl lib install location. 46 | # If the libraries and include files are installed in separate locations, 47 | # use curl_libdir and curl_incdir. 48 | #curl_dir = /usr/local 49 | # location of mpi.h (needed for parallel support) 50 | #mpi_incdir=/opt/local/include/mpich-mp 51 | [check-manifest] 52 | ignore = 53 | .gitignore 54 | README.gh-pages 55 | README.release 56 | examples/data/*nc 57 | examples/*ipynb 58 | -------------------------------------------------------------------------------- /test/test_cdl.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import netCDF4 3 | import os 4 | import pathlib 5 | 6 | test_ncdump="""netcdf ubyte { 7 | dimensions: 8 | d = 2 ; 9 | variables: 10 | byte ub(d) ; 11 | ub:_Unsigned = "true" ; 12 | byte sb(d) ; 13 | byte sb2(d) ; 14 | sb2:_Unsigned = "false" ; 15 | 16 | // global attributes: 17 | :_Format = "classic" ; 18 | } 19 | """ 20 | test_ncdump2="""netcdf ubyte { 21 | dimensions: 22 | d = 2 ; 23 | variables: 24 | byte ub(d) ; 25 | ub:_Unsigned = "true" ; 26 | byte sb(d) ; 27 | byte sb2(d) ; 28 | sb2:_Unsigned = "false" ; 29 | 30 | // global attributes: 31 | :_Format = "classic" ; 32 | data: 33 | 34 | ub = 0, -1 ; 35 | 36 | sb = -128, 127 ; 37 | 38 | sb2 = -127, -127 ; 39 | } 40 | """ 41 | 42 | 43 | ubyte_filename = pathlib.Path(__file__).parent / "ubyte.nc" 44 | 45 | 46 | @unittest.skipIf(os.getenv("NO_CDL"), "CDL test disabled") 47 | class Test_CDL(unittest.TestCase): 48 | """ 49 | Test import/export of CDL 50 | """ 51 | 52 | def setUp(self): 53 | with netCDF4.Dataset(ubyte_filename) as f: 54 | f.tocdl(outfile="ubyte.cdl", data=True) 55 | 56 | def test_tocdl(self): 57 | # treated as unsigned integers. 58 | with netCDF4.Dataset(ubyte_filename) as f: 59 | assert f.tocdl() == test_ncdump 60 | assert f.tocdl(data=True) == test_ncdump2 61 | 62 | def test_fromcdl(self): 63 | with netCDF4.Dataset.fromcdl("ubyte.cdl", ncfilename="ubyte2.nc") as f1: 64 | with netCDF4.Dataset(ubyte_filename) as f2: 65 | assert f1.variables.keys() == f2.variables.keys() 66 | assert f1.filepath() == "ubyte2.nc" 67 | assert f1.dimensions.keys() == f2.dimensions.keys() 68 | assert len(f1.dimensions["d"]) == len(f2.dimensions["d"]) 69 | assert (f1["ub"][:] == f2["ub"][:]).all() 70 | assert (f1["sb"][:] == f2["sb"][:]).all() 71 | 72 | # test if os.PathLike works 73 | with netCDF4.Dataset.fromcdl(pathlib.Path("ubyte.cdl"), ncfilename=pathlib.Path("ubyte3.nc")) as f3: 74 | assert f1.variables.keys() == f3.variables.keys() 75 | 76 | # check if correct errors are raised 77 | self.assertRaises(FileNotFoundError, netCDF4.Dataset.fromcdl, "doesnotexist.cdl") 78 | self.assertRaises(FileExistsError, netCDF4.Dataset.fromcdl, "ubyte.cdl", ncfilename="ubyte2.nc") 79 | 80 | # cleanup 81 | os.remove("ubyte2.nc") 82 | os.remove("ubyte3.nc") 83 | 84 | def tearDown(self): 85 | # Remove the temporary files 86 | os.remove('ubyte.cdl') 87 | 88 | if __name__ == '__main__': 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /examples/bench_compress2.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from numpy.random.mtrand import uniform 4 | import netCDF4 5 | from timeit import Timer 6 | import os, sys 7 | 8 | # create an n1dim by n2dim by n3dim random array. 9 | n1dim = 30 10 | n2dim = 15 11 | n3dim = 73 12 | n4dim = 144 13 | ntrials = 10 14 | sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) 15 | sys.stdout.write('(average of %s trials)\n\n' % ntrials) 16 | array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) 17 | 18 | 19 | def write_netcdf(filename,complevel,lsd): 20 | file = netCDF4.Dataset(filename,'w',format='NETCDF4') 21 | file.createDimension('n1', n1dim) 22 | file.createDimension('n2', n2dim) 23 | file.createDimension('n3', n3dim) 24 | file.createDimension('n4', n4dim) 25 | foo = file.createVariable('data',\ 26 | 'f8',('n1','n2','n3','n4'),\ 27 | zlib=True,shuffle=True,complevel=complevel,\ 28 | least_significant_digit=lsd) 29 | foo[:] = array 30 | file.close() 31 | 32 | def read_netcdf(filename): 33 | file = netCDF4.Dataset(filename) 34 | data = file.variables['data'][:] 35 | file.close() 36 | 37 | lsd = None 38 | sys.stdout.write('using least_significant_digit %s\n\n' % lsd) 39 | for complevel in range(0,10,2): 40 | sys.stdout.write('testing compression with complevel %s...\n' % complevel) 41 | # writing. 42 | t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") 43 | sys.stdout.write('writing took %s seconds\n' %\ 44 | repr(sum(t.repeat(ntrials,1))/ntrials)) 45 | # test reading. 46 | t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") 47 | sys.stdout.write('reading took %s seconds\n' % 48 | repr(sum(t.repeat(ntrials,1))/ntrials)) 49 | # print out size of resulting files. 50 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 51 | 52 | complevel = 4 53 | sys.stdout.write('\nusing complevel %s\n\n' % complevel) 54 | for lsd in range(1,6): 55 | sys.stdout.write('testing compression with least_significant_digit %s...\n' % lsd) 56 | # writing. 57 | t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") 58 | sys.stdout.write('writing took %s seconds\n' %\ 59 | repr(sum(t.repeat(ntrials,1))/ntrials)) 60 | # test reading. 61 | t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") 62 | sys.stdout.write('reading took %s seconds\n' % 63 | repr(sum(t.repeat(ntrials,1))/ntrials)) 64 | # print out size of resulting files. 65 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 66 | -------------------------------------------------------------------------------- /test/test_grps.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import netCDF4 6 | 7 | # test group creation. 8 | 9 | FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 10 | FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 11 | DYNASTY = "Tudor" 12 | HENRY_VII = "Henry_VII" 13 | MARGARET = "Margaret" 14 | JAMES_V_OF_SCOTLAND = "James_V_of_Scotland" 15 | MARY_I_OF_SCOTLAND = "Mary_I_of_Scotland" 16 | JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND = "James_VI_of_Scotland_and_I_of_England" 17 | names = [HENRY_VII,MARGARET,JAMES_V_OF_SCOTLAND,MARY_I_OF_SCOTLAND,JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND] 18 | root = '/' 19 | TREE1 = [root] 20 | for n in range(1,len(names)+1): 21 | path = [] 22 | for name in names[0:n]: 23 | path.append(root+name) 24 | TREE1.append(''.join(path)) 25 | TREE2 = [root,root+DYNASTY] 26 | for name in names: 27 | TREE2.append(root+DYNASTY+root+name) 28 | TREE2.sort() 29 | 30 | 31 | # python generator to walk the Group tree. 32 | def walktree(top): 33 | yield top.groups.values() 34 | for value in top.groups.values(): 35 | yield from walktree(value) 36 | 37 | class GroupsTestCase(unittest.TestCase): 38 | 39 | def setUp(self): 40 | self.file1 = FILE_NAME1 41 | f = netCDF4.Dataset(self.file1, 'w') 42 | g1 = f.createGroup(HENRY_VII) 43 | g2 = g1.createGroup(MARGARET) 44 | g3 = g2.createGroup(JAMES_V_OF_SCOTLAND) 45 | g4 = g3.createGroup(MARY_I_OF_SCOTLAND) 46 | g5 = g4.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND) 47 | f.close() 48 | self.file2 = FILE_NAME2 49 | f = netCDF4.Dataset(self.file2, 'w') 50 | g1 = netCDF4.Group(f,DYNASTY) 51 | g2 = g1.createGroup(HENRY_VII) 52 | g3 = g1.createGroup(MARGARET) 53 | g4 = g1.createGroup(JAMES_V_OF_SCOTLAND) 54 | g5 = g1.createGroup(MARY_I_OF_SCOTLAND) 55 | g6 = g1.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND) 56 | f.close() 57 | 58 | def tearDown(self): 59 | # Remove the temporary files 60 | os.remove(self.file1) 61 | os.remove(self.file2) 62 | 63 | def runTest(self): 64 | """testing groups""" 65 | f = netCDF4.Dataset(self.file1, 'r') 66 | # issue 988 67 | f.name 68 | tree = [f.path] 69 | for children in walktree(f): 70 | for child in children: 71 | tree.append(child.path) 72 | f.close() 73 | assert tree == TREE1 74 | f = netCDF4.Dataset(self.file2, 'r') 75 | tree = [f.path] 76 | for children in walktree(f): 77 | for child in children: 78 | tree.append(child.path) 79 | tree.sort() 80 | f.close() 81 | assert tree == TREE2 82 | 83 | if __name__ == '__main__': 84 | unittest.main() 85 | -------------------------------------------------------------------------------- /test/test_compoundatt.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | from netCDF4 import Dataset, CompoundType 6 | import numpy as np 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | 9 | # test compound attributes. 10 | 11 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 12 | DIM_NAME = 'time' 13 | VAR_NAME = 'wind' 14 | VAR_NAME2 = 'forecast_wind' 15 | GROUP_NAME = 'forecasts' 16 | dtype=np.dtype([('speed', 'f4'), ('direction', 'f4')]) 17 | TYPE_NAME = 'wind_vector_type' 18 | TYPE_NAMEC = 'wind_vectorunits_type' 19 | dtypec=np.dtype([('speed', 'S8'), ('direction', 'S8')]) 20 | missvals = np.empty(1,dtype) 21 | missvals['direction']=1.e20 22 | missvals['speed']=-999. 23 | windunits = np.empty(1,dtypec) 24 | windunits['speed'] = 'm/s' 25 | windunits['direction'] = 'degrees' 26 | 27 | class VariablesTestCase(unittest.TestCase): 28 | 29 | def setUp(self): 30 | self.file = FILE_NAME 31 | f = Dataset(self.file, 'w') 32 | d = f.createDimension(DIM_NAME,None) 33 | g = f.createGroup(GROUP_NAME) 34 | wind_vector_type = f.createCompoundType(dtype, TYPE_NAME) 35 | wind_vectorunits_type = f.createCompoundType(dtypec, TYPE_NAMEC) 36 | v = f.createVariable(VAR_NAME,wind_vector_type, DIM_NAME) 37 | vv = g.createVariable(VAR_NAME2,wind_vector_type,DIM_NAME) 38 | v.missing_values = missvals 39 | v.units = windunits 40 | vv.missing_values = missvals 41 | vv.units = windunits 42 | f.close() 43 | 44 | def tearDown(self): 45 | # Remove the temporary files 46 | os.remove(self.file) 47 | 48 | def runTest(self): 49 | """testing compound attributes""" 50 | f = Dataset(self.file, 'r') 51 | v = f.variables[VAR_NAME] 52 | g = f.groups[GROUP_NAME] 53 | vv = g.variables[VAR_NAME2] 54 | assert_array_almost_equal(v.missing_values['speed'], missvals['speed']) 55 | assert_array_almost_equal(v.missing_values['direction'],\ 56 | missvals['direction']) 57 | assert_array_almost_equal(vv.missing_values['speed'], missvals['speed']) 58 | assert_array_almost_equal(vv.missing_values['direction'],\ 59 | missvals['direction']) 60 | assert_array_equal(v.units['speed'], windunits['speed'].squeeze()) 61 | assert_array_equal(v.units['direction'],\ 62 | windunits['direction'].squeeze()) 63 | assert_array_equal(vv.units['speed'], windunits['speed'].squeeze()) 64 | assert_array_equal(vv.units['direction'],\ 65 | windunits['direction'].squeeze()) 66 | assert v.units['speed'] == b'm/s' 67 | assert v.units['direction'] == b'degrees' 68 | assert vv.units['speed'] == b'm/s' 69 | assert vv.units['direction'] == b'degrees' 70 | f.close() 71 | 72 | if __name__ == '__main__': 73 | unittest.main() 74 | -------------------------------------------------------------------------------- /examples/bench_compress3.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from numpy.random.mtrand import uniform 4 | import netCDF4 5 | from timeit import Timer 6 | import os, sys 7 | 8 | # use real data. 9 | URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc" 10 | nc = netCDF4.Dataset(URL) 11 | 12 | # use real 500 hPa geopotential height data. 13 | n1dim = 100 14 | n3dim = 73 15 | n4dim = 144 16 | ntrials = 10 17 | sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim)) 18 | sys.stdout.write('(average of %s trials)\n\n' % ntrials) 19 | print(nc) 20 | print(nc.variables['hgt']) 21 | array = nc.variables['hgt'][0:n1dim,5,:,:] 22 | print(array.min(), array.max(), array.shape, array.dtype) 23 | 24 | 25 | def write_netcdf(filename,complevel,lsd): 26 | file = netCDF4.Dataset(filename,'w',format='NETCDF4') 27 | file.createDimension('n1', None) 28 | file.createDimension('n3', n3dim) 29 | file.createDimension('n4', n4dim) 30 | foo = file.createVariable('data',\ 31 | 'f4',('n1','n3','n4'),\ 32 | zlib=True,shuffle=True,complevel=complevel,\ 33 | least_significant_digit=lsd) 34 | foo[:] = array 35 | file.close() 36 | 37 | def read_netcdf(filename): 38 | file = netCDF4.Dataset(filename) 39 | data = file.variables['data'][:] 40 | file.close() 41 | 42 | lsd = None 43 | sys.stdout.write('using least_significant_digit %s\n\n' % lsd) 44 | for complevel in range(0,10,2): 45 | sys.stdout.write('testing compression with complevel %s...\n' % complevel) 46 | # writing. 47 | t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") 48 | sys.stdout.write('writing took %s seconds\n' %\ 49 | repr(sum(t.repeat(ntrials,1))/ntrials)) 50 | # test reading. 51 | t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") 52 | sys.stdout.write('reading took %s seconds\n' % 53 | repr(sum(t.repeat(ntrials,1))/ntrials)) 54 | # print out size of resulting files. 55 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 56 | 57 | complevel = 4 58 | complevel = 4 59 | sys.stdout.write('\nusing complevel %s\n\n' % complevel) 60 | for lsd in range(0,6): 61 | sys.stdout.write('testing compression with least_significant_digit %s..\n'\ 62 | % lsd) 63 | # writing. 64 | t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") 65 | sys.stdout.write('writing took %s seconds\n' %\ 66 | repr(sum(t.repeat(ntrials,1))/ntrials)) 67 | # test reading. 68 | t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") 69 | sys.stdout.write('reading took %s seconds\n' % 70 | repr(sum(t.repeat(ntrials,1))/ntrials)) 71 | # print out size of resulting files. 72 | sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) 73 | -------------------------------------------------------------------------------- /examples/bench_diskless.py: -------------------------------------------------------------------------------- 1 | # benchmark reads and writes, with and without compression. 2 | # tests all four supported file formats. 3 | from typing import TYPE_CHECKING, Any, Literal 4 | from numpy.random.mtrand import uniform 5 | import netCDF4 6 | from timeit import Timer 7 | import os, sys 8 | if TYPE_CHECKING: 9 | from netCDF4 import Format as NCFormat 10 | else: 11 | NCFormat = Any 12 | 13 | # create an n1dim by n2dim by n3dim random array. 14 | n1dim = 30 15 | n2dim = 15 16 | n3dim = 73 17 | n4dim = 144 18 | ntrials = 10 19 | sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) 20 | array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) 21 | 22 | def write_netcdf(filename, zlib=False, least_significant_digit=None, format: NCFormat='NETCDF4',closeit=False): 23 | file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True) 24 | file.createDimension('n1', n1dim) 25 | file.createDimension('n2', n2dim) 26 | file.createDimension('n3', n3dim) 27 | file.createDimension('n4', n4dim) 28 | foo = file.createVariable('data',\ 29 | 'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=None) 30 | foo.testme="hi I am an attribute" 31 | foo.testme1="hi I am an attribute" 32 | foo.testme2="hi I am an attribute" 33 | foo.testme3="hi I am an attribute" 34 | foo.testme4="hi I am an attribute" 35 | foo.testme5="hi I am an attribute" 36 | foo[:] = array 37 | if closeit: file.close() 38 | return file 39 | 40 | def read_netcdf(ncfile): 41 | data = ncfile.variables['data'][:] 42 | 43 | for format in ['NETCDF4','NETCDF3_CLASSIC','NETCDF3_64BIT']: 44 | sys.stdout.write('testing file format %s ...\n' % format) 45 | # writing, no compression. 46 | t = Timer("write_netcdf('test1.nc',closeit=True,format='%s')" % format,"from __main__ import write_netcdf") 47 | sys.stdout.write('writing took %s seconds\n' %\ 48 | repr(sum(t.repeat(ntrials,1))/ntrials)) 49 | # test reading. 50 | ncfile = write_netcdf('test1.nc',format=format) # type: ignore 51 | t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") 52 | sys.stdout.write('reading took %s seconds\n' % 53 | repr(sum(t.repeat(ntrials,1))/ntrials)) 54 | 55 | # test diskless=True in nc_open 56 | format: Literal["NETCDF3_CLASSIC"] = 'NETCDF3_CLASSIC' # mypy should know this but it needs help... 57 | trials=50 58 | sys.stdout.write('test caching of file in memory on open for %s\n' % format) 59 | sys.stdout.write('testing file format %s ...\n' % format) 60 | write_netcdf('test1.nc',format=format,closeit=True) 61 | ncfile = netCDF4.Dataset('test1.nc',diskless=False) 62 | t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") 63 | sys.stdout.write('reading (from disk) took %s seconds\n' % 64 | repr(sum(t.repeat(ntrials,1))/ntrials)) 65 | ncfile.close() 66 | ncfile = netCDF4.Dataset('test1.nc',diskless=True) 67 | # setting diskless=True should cache the file in memory, 68 | # resulting in faster reads. 69 | t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") 70 | sys.stdout.write('reading (cached in memory) took %s seconds\n' % 71 | repr(sum(t.repeat(ntrials,1))/ntrials)) 72 | ncfile.close() 73 | -------------------------------------------------------------------------------- /.github/workflows/miniconda.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [master] 7 | 8 | jobs: 9 | run-serial: 10 | runs-on: ${{ matrix.os }} 11 | #env: 12 | # NO_NET: 1 13 | strategy: 14 | matrix: 15 | python-version: [ "3.10", "3.11", "3.12", "3.13", "3.14" ] 16 | os: [windows-latest, ubuntu-latest, macos-latest] 17 | platform: [x64, x32] 18 | exclude: 19 | - os: macos-latest 20 | platform: x32 21 | fail-fast: false 22 | defaults: 23 | run: 24 | shell: bash -l {0} 25 | 26 | steps: 27 | - uses: actions/checkout@v6 28 | with: 29 | submodules: true 30 | 31 | - name: Setup Micromamba 32 | uses: mamba-org/setup-micromamba@v2 33 | with: 34 | environment-name: TEST 35 | init-shell: bash 36 | create-args: >- 37 | python=${{ matrix.python-version }} 38 | numpy cython pip setuptools pytest hdf5 libnetcdf cftime zlib certifi typing-extensions 39 | --channel conda-forge 40 | 41 | - name: Install netcdf4-python 42 | run: | 43 | export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" # so setup.py finds nc-config 44 | python -m pip install -v -e . --no-deps --no-build-isolation --force-reinstall 45 | 46 | - name: Tests 47 | run: | 48 | if [ "$RUNNER_OS" == "Windows" ]; then 49 | export HDF5_PLUGIN_PATH="${CONDA_PREFIX}\\Library\\hdf5\\lib\\plugin" 50 | else 51 | export HDF5_PLUGIN_PATH="${CONDA_PREFIX}/hdf5/lib/plugin/" 52 | fi 53 | pytest -s -rxs -v test 54 | 55 | run-mpi: 56 | runs-on: ${{ matrix.os }} 57 | strategy: 58 | matrix: 59 | python-version: [ "3.12" ] 60 | os: [ubuntu-latest] 61 | platform: [x64] 62 | defaults: 63 | run: 64 | shell: bash -l {0} 65 | steps: 66 | - uses: actions/checkout@v6 67 | with: 68 | submodules: true 69 | 70 | - name: Setup Micromamba 71 | uses: mamba-org/setup-micromamba@v2 72 | with: 73 | environment-name: TEST 74 | init-shell: bash 75 | create-args: >- 76 | python=${{ matrix.python-version }} 77 | numpy cython pip pytest openmpi mpi4py hdf5=*=mpi* libnetcdf=*=mpi* cftime zlib certifi typing-extensions 78 | --channel conda-forge 79 | 80 | - name: Install netcdf4-python with mpi 81 | run: | 82 | export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" # so setup.py finds nc-config 83 | nc-config --all 84 | python -m pip install -v -e . --no-build-isolation --no-deps --force-reinstall 85 | 86 | - name: Tests 87 | run: | 88 | cd test && python run_all.py 89 | cd ../examples 90 | export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" 91 | which mpirun 92 | mpirun --version 93 | mpirun -np 4 --oversubscribe python mpi_example.py # for openmpi 94 | #mpirun -np 4 python mpi_example.py 95 | if [ $? -ne 0 ] ; then 96 | echo "hdf5 mpi test failed!" 97 | exit 1 98 | else 99 | echo "hdf5 mpi test passed!" 100 | fi 101 | -------------------------------------------------------------------------------- /man/nc3tonc4.1: -------------------------------------------------------------------------------- 1 | .\" (C) Copyright 2015, Ross Gammon , 2 | .\" 3 | .TH NC3TONC4 1 "22 Mar 2015" 4 | .\" 5 | .SH NAME 6 | nc3tonc4 \- a program to convert netCDF 3 files to netCDF 4 format files 7 | .SH SYNOPSIS 8 | .B nc3tonc4 9 | .RB [ \-h ] 10 | .RB [ \-o ] 11 | .RB [ \-\-vars=\fIvar1,var2,..\fR ] 12 | .RB [ \-\-zlib=\fI(0|1)\fR ] 13 | .RB [ \-\-complevel=\fI(1\-9)\fR ] 14 | .RB [ \-\-shuffle=\fI(0|1)\fR ] 15 | .RB [ \-\-fletcher32=\fI(0|1)\fR ] 16 | .RB [ \-\-unpackshort=\fI(0|1)\fR ] 17 | .RB [ \-\-quantize=\fIvar1=n1,var2=n2,..\fR ] 18 | .I netcdf3filename 19 | .I netcdf4filename 20 | .br 21 | .SH DESCRIPTION 22 | This manual page documents briefly the 23 | .B nc3tonc4 24 | command. 25 | .PP 26 | \fBnc3tonc4\fP is a program that converts a netCDF 3 file into netCDF 4 format, optionally unpacking variables packed as short integers (with scale_factor and add_offset) to floats, and adding zlib compression (with the HDF5 shuffle filter and fletcher32 checksum). Data may also be quantized (truncated) to a specified precision to improve compression. 27 | .SH OPTIONS 28 | These programs follow the usual GNU command line syntax, with long 29 | options starting with two dashes (`-'). 30 | A summary of options is included below. 31 | .TP 32 | .B \-h 33 | Shows a summary of the available options. 34 | .TP 35 | .B \-o 36 | Overwrite destination file (default is to raise an error if output file already exists). 37 | .TP 38 | .B \-\-vars 39 | A comma separated list of variable names to copy (default is to copy all variables). 40 | .TP 41 | .B \-\-classic=(0|1) 42 | Use NETCDF4_CLASSIC format instead of NETCDF4 (default = 1). 43 | .TP 44 | .B \-\-zlib=(0|1) 45 | Activate (or disable) zlib compression (the default is to activate). 46 | .TP 47 | .B \-\-complevel=(1-9) 48 | Set the zlib compression level (6 is default). 49 | .TP 50 | .B \-\-shuffle=(0|1) 51 | Activate (or disable) the shuffle filter (it is active by default). 52 | .TP 53 | .B \-\-fletcher32=(0|1) 54 | Activate (or disable) the fletcher32 checksum (it is not active by default). 55 | .TP 56 | .B \-\-unpackshort=(0|1) 57 | Unpack short integer variables to float variables using scale_factor and add_offset netCDF variable attributes (it is active by default). 58 | .TP 59 | .B \-\-quantize=(comma separated list of "variable name=integer" pairs) 60 | Truncate the data in the specified variables to a given decimal precision. For example, 'speed=2, height=-2, temp=0' will cause the variable 'speed' to be truncated to a precision of 0.01, 'height' to a precision of 100 and 'temp' to 1. This can significantly improve compression. The default is not to quantize any of the variables. 61 | .TP 62 | .B \-\-quiet=(0|1) 63 | If set to 1, don't print any diagnostic information. 64 | .TP 65 | .B \-\-chunk=(integer) 66 | The number of records along unlimited dimension to write at once. The default is 10. It is ignored if there is no unlimited dimension. If chunk=0, it means write all the data at once. 67 | .TP 68 | .B \-\-istart=(integer) 69 | The number of the record to start at along unlimited dimension. The default is 0. This option is ignored if there is no unlimited dimension. 70 | .TP 71 | .B \-\-istop=(integer) 72 | The number of the record to stop at along unlimited dimension. The default is 1. This option is ignored if there is no unlimited dimension. 73 | .SH SEE ALSO 74 | .BR ncinfo (1), 75 | .BR nc4tonc3 (1). 76 | .br 77 | .SH AUTHOR 78 | This manual page was written by Ross Gammon based on the options displayed by nc3tonc4 \-h. 79 | -------------------------------------------------------------------------------- /test/test_diskless.py: -------------------------------------------------------------------------------- 1 | import unittest, os, tempfile 2 | import numpy as np 3 | from numpy.random.mtrand import uniform 4 | from numpy.testing import assert_array_equal, assert_array_almost_equal 5 | import netCDF4 6 | 7 | # rudimentary test of diskless file capability. 8 | 9 | # create an n1dim by n2dim by n3dim random array 10 | n1dim = 10 11 | n2dim = 73 12 | n3dim = 144 13 | ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim)) 14 | ranarr2 = 100.*uniform(size=(n1dim,n2dim,n3dim)) 15 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=True).name 16 | FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 17 | 18 | 19 | @unittest.skipIf( 20 | netCDF4.__netcdf4libversion__ < "4.2.1" 21 | or netCDF4.__has_parallel4_support__ 22 | or netCDF4.__has_pnetcdf_support__, 23 | "no diskless support", 24 | ) 25 | class DisklessTestCase(unittest.TestCase): 26 | def setUp(self): 27 | # in memory file, does not exist on disk (closing it 28 | # makes data disappear from memory) 29 | self.file = FILE_NAME 30 | f = netCDF4.Dataset(self.file,'w',diskless=True, persist=False) 31 | self.f = f 32 | # foo has a single unlimited dimension 33 | f.createDimension('n1', n1dim) 34 | f.createDimension('n2', n2dim) 35 | f.createDimension('n3', n3dim) 36 | foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) 37 | # write some data to it. 38 | foo[0:n1dim-1] = ranarr[:-1,:,:] 39 | foo[n1dim-1] = ranarr[-1,:,:] 40 | # bar has 2 unlimited dimensions 41 | f.createDimension('n4', None) 42 | # write some data to it. 43 | bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n1','n2','n4')) 44 | bar[0:n1dim,:, 0:n3dim] = ranarr2 45 | 46 | # in memory file, that is persisted to disk when close method called. 47 | self.file2 = FILE_NAME2 48 | f2 = netCDF4.Dataset(self.file2,'w',diskless=True, persist=True) 49 | f2.createDimension('n1', n1dim) 50 | f2.createDimension('n2', n2dim) 51 | f2.createDimension('n3', n3dim) 52 | foo = f2.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) 53 | # write some data to it. 54 | foo[0:n1dim-1] = ranarr[:-1,:,:] 55 | foo[n1dim-1] = ranarr[-1,:,:] 56 | f2.close() 57 | 58 | def tearDown(self): 59 | # Remove the temporary files 60 | os.remove(self.file2) 61 | self.f.close() 62 | 63 | def runTest(self): 64 | """testing diskless file capability""" 65 | foo = self.f.variables['data1'] 66 | bar = self.f.variables['data2'] 67 | # check shape. 68 | self.assertTrue(foo.shape == (n1dim,n2dim,n3dim)) 69 | self.assertTrue(bar.shape == (n1dim,n2dim,n3dim)) 70 | # check data. 71 | assert_array_almost_equal(foo[:], ranarr) 72 | assert_array_almost_equal(bar[:], ranarr2) 73 | # file does not actually exist on disk 74 | assert os.path.isfile(self.file)==False 75 | # open persisted file. 76 | # first, check that file does actually exist on disk 77 | assert os.path.isfile(self.file2)==True 78 | f = netCDF4.Dataset(self.file2) 79 | foo = f.variables['data1'] 80 | # check shape. 81 | self.assertTrue(foo.shape == (n1dim,n2dim,n3dim)) 82 | # check data. 83 | assert_array_almost_equal(foo[:], ranarr) 84 | f.close() 85 | 86 | if __name__ == '__main__': 87 | unittest.main() 88 | -------------------------------------------------------------------------------- /.github/workflows/build_master.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test on Linux with netcdf-c github master 2 | on: [push, pull_request] 3 | jobs: 4 | build-linux: 5 | name: Python (${{ matrix.python-version }}) 6 | runs-on: ubuntu-latest 7 | env: 8 | NETCDF_DIR: ${{ github.workspace }}/.. 9 | #CC: mpicc.mpich 10 | CC: mpicc 11 | #NO_NET: 1 12 | strategy: 13 | matrix: 14 | python-version: ["3.14"] 15 | steps: 16 | 17 | - uses: actions/checkout@v6 18 | with: 19 | submodules: true 20 | 21 | - name: Set up Python ${{ matrix.python-version }} 22 | uses: actions/setup-python@v6 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | 26 | - name: Install Ubuntu Dependencies 27 | run: | 28 | sudo apt-get update 29 | #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 30 | sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 31 | echo "Download and build netCDF github master" 32 | git clone https://github.com/Unidata/netcdf-c 33 | pushd netcdf-c 34 | #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" 35 | export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" 36 | export LDFLAGS="-L${NETCDF_DIR}/lib" 37 | #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" 38 | export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" 39 | autoreconf -i 40 | ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 41 | make -j 2 42 | sudo make install 43 | popd 44 | 45 | # - name: The job has failed 46 | # if: ${{ failure() }} 47 | # run: | 48 | # cd netcdf-c-${NETCDF_VERSION} 49 | # cat config.log 50 | 51 | - name: Install python dependencies via pip 52 | run: | 53 | python -m pip install --upgrade pip 54 | python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py mypy types-setuptools typing-extensions 55 | 56 | - name: Install netcdf4-python 57 | run: | 58 | export PATH=${NETCDF_DIR}/bin:${PATH} 59 | export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c/plugins/plugindir 60 | python -m pip install . --no-build-isolation 61 | 62 | - name: Test 63 | run: | 64 | export PATH=${NETCDF_DIR}/bin:${PATH} 65 | #export HDF5_PLUGIN_PATH=${NETCDF_DIR}/plugins/plugindir 66 | python checkversion.py 67 | # serial 68 | cd test 69 | python run_all.py 70 | # parallel 71 | cd ../examples 72 | #mpirun.mpich -np 4 python mpi_example.py 73 | mpirun -np 4 --oversubscribe python mpi_example.py 74 | if [ $? -ne 0 ] ; then 75 | echo "hdf5 mpi test failed!" 76 | exit 1 77 | else 78 | echo "hdf5 mpi test passed!" 79 | fi 80 | #mpirun.mpich -np 4 python mpi_example_compressed.py 81 | mpirun -np 4 --oversubscribe python mpi_example_compressed.py 82 | if [ $? -ne 0 ] ; then 83 | echo "hdf5 compressed mpi test failed!" 84 | exit 1 85 | else 86 | echo "hdf5 compressed mpi test passed!" 87 | fi 88 | 89 | - name: Stubtest 90 | run: | 91 | stubtest netCDF4 --allowlist .github/stubtest-allowlist --mypy-config-file=pyproject.toml 92 | mypy test 93 | mypy examples 94 | -------------------------------------------------------------------------------- /test/test_vars.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import numpy as np 6 | from numpy.random.mtrand import uniform 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | import netCDF4 9 | 10 | # test variable creation. 11 | 12 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 13 | VAR_DOUBLE_NAME="dummy_var" 14 | VAR_SHORT_NAME='dummy_var_short' 15 | VARNAMES = sorted([VAR_DOUBLE_NAME,VAR_SHORT_NAME]) 16 | GROUP_NAME = "dummy_group" 17 | DIM1_NAME="x" 18 | DIM1_LEN=2 19 | DIM2_NAME="y" 20 | DIM2_LEN=3 21 | DIM3_NAME="z" 22 | DIM3_LEN=25 23 | 24 | randomdata = uniform(size=(DIM1_LEN,DIM2_LEN,DIM3_LEN)) 25 | 26 | class VariablesTestCase(unittest.TestCase): 27 | 28 | def setUp(self): 29 | self.file = FILE_NAME 30 | f = netCDF4.Dataset(self.file, 'w') 31 | f.createDimension(DIM1_NAME, DIM1_LEN) 32 | f.createDimension(DIM2_NAME, DIM2_LEN) 33 | f.createDimension(DIM3_NAME, DIM3_LEN) 34 | v1 = f.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) 35 | v2 = f.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME)) 36 | v1.long_name = 'dummy data root' 37 | g = f.createGroup(GROUP_NAME) 38 | g.createDimension(DIM1_NAME, DIM1_LEN) 39 | g.createDimension(DIM2_NAME, DIM2_LEN) 40 | g.createDimension(DIM3_NAME, DIM3_LEN) 41 | v1g = g.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) 42 | v2g = g.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME)) 43 | v1g.long_name = 'dummy data subgroup' 44 | v1[:] = randomdata 45 | v1g[:] = randomdata 46 | f.close() 47 | 48 | def tearDown(self): 49 | # Remove the temporary files 50 | os.remove(self.file) 51 | 52 | def runTest(self): 53 | """testing primitive variables""" 54 | f = netCDF4.Dataset(self.file, 'r') 55 | # check variables in root group. 56 | varnames = sorted(f.variables.keys()) 57 | v1 = f.variables[VAR_DOUBLE_NAME] 58 | v2 = f.variables[VAR_SHORT_NAME] 59 | assert varnames == VARNAMES 60 | assert v1.dtype.str[1:] == 'f8' 61 | assert v2.dtype.str[1:] == 'i2' 62 | assert v1.long_name == 'dummy data root' 63 | assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME) 64 | assert v2.dimensions == (DIM2_NAME,DIM3_NAME) 65 | assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN) 66 | assert v2.shape == (DIM2_LEN,DIM3_LEN) 67 | assert v1.size == DIM1_LEN * DIM2_LEN * DIM3_LEN 68 | assert len(v1) == DIM1_LEN 69 | 70 | #assert np.allclose(v1[:],randomdata) 71 | assert_array_almost_equal(v1[:],randomdata) 72 | # check variables in sub group. 73 | g = f.groups[GROUP_NAME] 74 | varnames = sorted(g.variables.keys()) 75 | v1 = g.variables[VAR_DOUBLE_NAME] 76 | # test iterating over variable (should stop when 77 | # it gets to the end and raises IndexError, issue 121) 78 | for v in v1: 79 | pass 80 | v2 = g.variables[VAR_SHORT_NAME] 81 | assert varnames == VARNAMES 82 | assert v1.dtype.str[1:] == 'f8' 83 | assert v2.dtype.str[1:] == 'i2' 84 | assert v1.long_name == 'dummy data subgroup' 85 | assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME) 86 | assert v2.dimensions == (DIM2_NAME,DIM3_NAME) 87 | assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN) 88 | assert v2.shape == (DIM2_LEN,DIM3_LEN) 89 | #assert np.allclose(v1[:],randomdata) 90 | assert_array_almost_equal(v1[:],randomdata) 91 | f.close() 92 | 93 | if __name__ == '__main__': 94 | unittest.main() 95 | -------------------------------------------------------------------------------- /test/test_complex.py: -------------------------------------------------------------------------------- 1 | import netCDF4 2 | import numpy as np 3 | import pathlib 4 | import tempfile 5 | import unittest 6 | 7 | complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j], dtype="c16") 8 | np_dt = np.dtype([("r", np.float64), ("i", np.float64)]) 9 | complex_struct_array = np.array( 10 | [(r, i) for r, i in zip(complex_array.real, complex_array.imag)], 11 | dtype=np_dt, 12 | ) 13 | 14 | 15 | class ComplexNumbersTestCase(unittest.TestCase): 16 | def setUp(self): 17 | self.tmp_path = pathlib.Path(tempfile.mkdtemp()) 18 | 19 | def test_read_dim(self): 20 | filename = self.tmp_path / "test_read_dim.nc" 21 | 22 | with netCDF4.Dataset(filename, "w") as f: 23 | f.createDimension("x", size=len(complex_array)) 24 | f.createDimension("ri", size=2) 25 | c_ri = f.createVariable("data_dim", np.float64, ("x", "ri")) 26 | as_dim_array = np.vstack((complex_array.real, complex_array.imag)).T 27 | c_ri[:] = as_dim_array 28 | 29 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 30 | assert "data_dim" in f.variables 31 | data_dim = f["data_dim"] 32 | assert data_dim.shape == complex_array.shape 33 | data = data_dim[:] 34 | 35 | assert np.array_equal(data, complex_array) 36 | 37 | def test_read_struct(self): 38 | filename = self.tmp_path / "test_read_struct.nc" 39 | 40 | with netCDF4.Dataset(filename, "w") as f: 41 | f.createDimension("x", size=len(complex_array)) 42 | nc_dt = f.createCompoundType(np_dt, "nc_complex") 43 | c_struct = f.createVariable("data_struct", nc_dt, ("x",)) 44 | c_struct[:] = complex_struct_array 45 | 46 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 47 | assert "data_struct" in f.variables 48 | data = f["data_struct"][:] 49 | 50 | assert np.array_equal(data, complex_array) 51 | 52 | def test_write(self): 53 | filename = self.tmp_path / "test_write.nc" 54 | with netCDF4.Dataset(filename, "w", auto_complex=True) as f: 55 | f.createDimension("x", size=len(complex_array)) 56 | complex_var = f.createVariable("complex_data", "c16", ("x",)) 57 | complex_var[:] = complex_array 58 | 59 | with netCDF4.Dataset(filename, "r") as f: 60 | assert "complex_data" in f.variables 61 | assert np.array_equal(f["complex_data"], complex_struct_array) 62 | 63 | def test_write_with_np_complex128(self): 64 | filename = self.tmp_path / "test_write_with_np_complex128.nc" 65 | with netCDF4.Dataset(filename, "w", auto_complex=True) as f: 66 | f.createDimension("x", size=len(complex_array)) 67 | complex_var = f.createVariable("complex_data", np.complex128, ("x",)) 68 | complex_var[:] = complex_array 69 | 70 | with netCDF4.Dataset(filename, "r") as f: 71 | assert "complex_data" in f.variables 72 | assert np.array_equal(f["complex_data"], complex_struct_array) 73 | 74 | def test_write_netcdf3(self): 75 | filename = self.tmp_path / "test_write_netcdf3.nc" 76 | with netCDF4.Dataset( 77 | filename, "w", format="NETCDF3_CLASSIC", auto_complex=True 78 | ) as f: 79 | f.createDimension("x", size=len(complex_array)) 80 | complex_var = f.createVariable("complex_data", "c16", ("x",)) 81 | complex_var[:] = complex_array 82 | 83 | with netCDF4.Dataset(filename, "r", auto_complex=True) as f: 84 | assert "complex_data" in f.variables 85 | assert np.array_equal(f["complex_data"][:], complex_array) 86 | 87 | 88 | if __name__ == "__main__": 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /test/test_enum.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import unittest 4 | 5 | import netCDF4 6 | import numpy as np 7 | from netCDF4 import Dataset, EnumType 8 | from numpy.testing import assert_array_equal 9 | 10 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 11 | ENUM_NAME = 'cloud_t' 12 | ENUM_BASETYPE = np.int8 13 | VAR_NAME = 'primary_cloud' 14 | ENUM_DICT = {'Altocumulus': 7, 'Missing': 127, 'Stratus': 2, 'Clear': 0, 15 | 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 16 | 'Stratocumulus': 3} 17 | datain = np.array([ENUM_DICT['Clear'],ENUM_DICT['Stratus'],ENUM_DICT['Cumulus'],\ 18 | ENUM_DICT['Missing'],ENUM_DICT['Cumulonimbus']],dtype=ENUM_BASETYPE) 19 | datain_masked = np.ma.masked_values(datain,ENUM_DICT['Missing']) 20 | 21 | 22 | class EnumTestCase(unittest.TestCase): 23 | 24 | def setUp(self): 25 | self.file = FILE_NAME 26 | f = Dataset(self.file,'w') 27 | cloud_type = f.createEnumType(ENUM_BASETYPE,ENUM_NAME,ENUM_DICT) 28 | # make sure KeyError raised if non-integer basetype used. 29 | try: 30 | cloud_typ2 = f.createEnumType(np.float32,ENUM_NAME,ENUM_DICT) # type: ignore[arg-type] # mypy correctly doesn't like float32 31 | except KeyError: 32 | pass 33 | f.createDimension('time',None) 34 | cloud_var =\ 35 | f.createVariable(VAR_NAME,cloud_type,'time',\ 36 | fill_value=ENUM_DICT['Missing']) 37 | cloud_var[:] = datain_masked 38 | # make sure ValueError raised if illegal value assigned to Enum var. 39 | try: 40 | cloud_var[cloud_var.shape[0]] = 99 41 | except ValueError: 42 | pass 43 | f.close() 44 | 45 | def tearDown(self): 46 | # Remove the temporary files 47 | os.remove(self.file) 48 | 49 | def runTest(self): 50 | """testing enum data type""" 51 | f = Dataset(self.file, 'r') 52 | v = f.variables[VAR_NAME] 53 | assert isinstance(v.datatype, EnumType) 54 | assert v.datatype.enum_dict == ENUM_DICT 55 | assert list(f.enumtypes.keys()) == [ENUM_NAME] 56 | assert f.enumtypes[ENUM_NAME].name == ENUM_NAME # issue 775 57 | assert f.enumtypes[ENUM_NAME].dtype == ENUM_BASETYPE 58 | assert v._FillValue == ENUM_DICT['Missing'] 59 | v.set_auto_mask(False) 60 | data = v[:] 61 | assert_array_equal(data, datain) 62 | v.set_auto_mask(True) # check to see if auto masking works 63 | data = v[:] 64 | assert_array_equal(data, datain_masked) 65 | assert_array_equal(data.mask, datain_masked.mask) 66 | f.close() 67 | 68 | class EnumDictTestCase(unittest.TestCase): 69 | 70 | # issue 1128 71 | def setUp(self): 72 | DT = np.int16; BITS = 8 73 | self.STORED_VAL = DT(2**BITS) 74 | self.VAL_MAP = {f'bits_{n}': DT(2**n) for n in range(1,BITS+1)} 75 | self.VAL_MAP['invalid'] = DT(0) 76 | self.file = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 77 | with netCDF4.Dataset(self.file, 'w') as nc: 78 | # The enum is created with dtype=int16, so it will allow BITS values up to 15 79 | et = nc.createEnumType(DT, 'etype', self.VAL_MAP) 80 | ev = nc.createVariable('evar', et) 81 | # Succeeds because the created EnumType does keep the correct dict 82 | ev[...] = self.STORED_VAL 83 | 84 | def tearDown(self): 85 | os.remove(self.file) 86 | 87 | def runTest(self): 88 | with netCDF4.Dataset(self.file, 'r') as nc: 89 | read_var = nc['evar'] 90 | read_et = nc.enumtypes["etype"] 91 | assert read_var[...] == self.STORED_VAL 92 | assert read_et.enum_dict == self.VAL_MAP 93 | 94 | if __name__ == '__main__': 95 | unittest.main() 96 | -------------------------------------------------------------------------------- /test/test_masked6.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import tempfile 4 | 5 | import numpy as np 6 | from numpy import ma 7 | from numpy.testing import assert_array_almost_equal 8 | from netCDF4 import Dataset 9 | 10 | # Test automatic conversion of masked arrays (set_always_mask()) 11 | 12 | class SetAlwaysMaskTestBase(unittest.TestCase): 13 | 14 | """Base object for tests checking the functionality of set_always_mask()""" 15 | 16 | def setUp(self): 17 | 18 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 19 | 20 | self.v = np.array([4, 3, 2, 1], dtype="i2") 21 | self.w = np.ma.array([-1, -2, -3, -4], mask=[False, True, False, False], dtype="i2") 22 | 23 | f = Dataset(self.testfile, 'w') 24 | _ = f.createDimension('x', None) 25 | v = f.createVariable('v', "i2", 'x') 26 | w = f.createVariable('w', "i2", 'x') 27 | 28 | v[...] = self.v 29 | w[...] = self.w 30 | 31 | f.close() 32 | 33 | def tearDown(self): 34 | 35 | os.remove(self.testfile) 36 | 37 | 38 | class SetAlwaysMaskTrue(SetAlwaysMaskTestBase): 39 | 40 | def test_always_mask(self): 41 | 42 | """Testing auto-conversion of masked arrays with no missing values to regular arrays.""" 43 | f = Dataset(self.testfile) 44 | 45 | f.variables["v"].set_always_mask(True) # The default anyway... 46 | 47 | v = f.variables['v'][:] 48 | 49 | self.assertTrue(isinstance(v, np.ndarray)) 50 | self.assertTrue(isinstance(v, ma.masked_array)) 51 | assert_array_almost_equal(v, self.v) 52 | 53 | w = f.variables['w'][:] 54 | 55 | self.assertTrue(isinstance(w, np.ndarray)) 56 | self.assertTrue(isinstance(w, ma.masked_array)) 57 | assert_array_almost_equal(w, self.w) 58 | 59 | f.close() 60 | 61 | class SetAlwyasMaskFalse(SetAlwaysMaskTestBase): 62 | 63 | def test_always_mask(self): 64 | 65 | """Testing auto-conversion of masked arrays with no missing values to regular arrays.""" 66 | f = Dataset(self.testfile) 67 | 68 | f.variables["v"].set_always_mask(False) 69 | v = f.variables['v'][:] 70 | 71 | self.assertTrue(isinstance(v, np.ndarray)) 72 | self.assertFalse(isinstance(v, ma.masked_array)) 73 | assert_array_almost_equal(v, self.v) 74 | 75 | w = f.variables['w'][:] 76 | 77 | self.assertTrue(isinstance(w, np.ndarray)) 78 | self.assertTrue(isinstance(w, ma.masked_array)) 79 | assert_array_almost_equal(w, self.w) 80 | 81 | f.close() 82 | 83 | class GlobalSetAlwaysMaskTest(unittest.TestCase): 84 | 85 | def setUp(self): 86 | 87 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 88 | 89 | f = Dataset(self.testfile, 'w') 90 | 91 | grp1 = f.createGroup('Group1') 92 | grp2 = f.createGroup('Group2') 93 | f.createGroup('Group3') # empty group 94 | 95 | f.createVariable('var0', "i2", ()) 96 | grp1.createVariable('var1', 'f8', ()) 97 | grp2.createVariable('var2', 'f4', ()) 98 | 99 | f.close() 100 | 101 | def tearDown(self): 102 | 103 | os.remove(self.testfile) 104 | 105 | def runTest(self): 106 | 107 | # Note: The default behaviour is to always return masked 108 | # arrays, which is already tested elsewhere. 109 | 110 | f = Dataset(self.testfile, "r") 111 | 112 | # Without regular numpy arrays 113 | 114 | f.set_always_mask(True) 115 | 116 | v0 = f.variables['var0'] 117 | v1 = f.groups['Group1'].variables['var1'] 118 | v2 = f.groups['Group2'].variables['var2'] 119 | 120 | self.assertTrue(v0.always_mask) 121 | self.assertTrue(v1.always_mask) 122 | self.assertTrue(v2.always_mask) 123 | 124 | # With regular numpy arrays 125 | 126 | f.set_always_mask(False) 127 | 128 | self.assertFalse(v0.always_mask) 129 | self.assertFalse(v1.always_mask) 130 | self.assertFalse(v2.always_mask) 131 | 132 | f.close() 133 | 134 | 135 | if __name__ == '__main__': 136 | unittest.main() 137 | -------------------------------------------------------------------------------- /test/test_compression_blosc.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Any, Literal 2 | from numpy.random.mtrand import uniform 3 | from netCDF4 import Dataset 4 | from numpy.testing import assert_almost_equal 5 | import os, tempfile, unittest, sys, pytest 6 | from filter_availability import no_plugins, has_blosc_filter 7 | if TYPE_CHECKING: 8 | from netCDF4 import CompressionLevel 9 | else: 10 | CompressionLevel = Any 11 | 12 | 13 | ndim = 100000 14 | iblosc_shuffle=2 15 | iblosc_complevel=4 16 | filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 17 | datarr = uniform(size=(ndim,)) 18 | 19 | def write_netcdf(filename, dtype='f8', blosc_shuffle: Literal[0, 1, 2] = 1, complevel: CompressionLevel = 6): 20 | nc = Dataset(filename,'w') 21 | nc.createDimension('n', ndim) 22 | foo = nc.createVariable('data',\ 23 | dtype,('n'),compression=None) 24 | foo_lz = nc.createVariable('data_lz',\ 25 | dtype,('n'),compression='blosc_lz',blosc_shuffle=blosc_shuffle,complevel=complevel) 26 | foo_lz4 = nc.createVariable('data_lz4',\ 27 | dtype,('n'),compression='blosc_lz4',blosc_shuffle=blosc_shuffle,complevel=complevel) 28 | foo_lz4hc = nc.createVariable('data_lz4hc',\ 29 | dtype,('n'),compression='blosc_lz4hc',blosc_shuffle=blosc_shuffle,complevel=complevel) 30 | foo_zlib = nc.createVariable('data_zlib',\ 31 | dtype,('n'),compression='blosc_zlib',blosc_shuffle=blosc_shuffle,complevel=complevel) 32 | foo_zstd = nc.createVariable('data_zstd',\ 33 | dtype,('n'),compression='blosc_zstd',blosc_shuffle=blosc_shuffle,complevel=complevel) 34 | foo_lz[:] = datarr 35 | foo_lz4[:] = datarr 36 | foo_lz4hc[:] = datarr 37 | foo_zlib[:] = datarr 38 | foo_zstd[:] = datarr 39 | nc.close() 40 | 41 | 42 | @unittest.skipIf(no_plugins or not has_blosc_filter, "blosc filter not available") 43 | # allow failures for this test for now (it fails in Windows wheel workflow) 44 | @pytest.mark.xfail 45 | class CompressionTestCase(unittest.TestCase): 46 | def setUp(self): 47 | self.filename = filename 48 | write_netcdf(self.filename,complevel=iblosc_complevel,blosc_shuffle=iblosc_shuffle) # type: ignore 49 | 50 | def tearDown(self): 51 | # Remove the temporary files 52 | os.remove(self.filename) 53 | 54 | def runTest(self): 55 | f = Dataset(self.filename) 56 | assert_almost_equal(datarr,f.variables['data'][:]) 57 | assert f.variables['data'].filters() ==\ 58 | {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} 59 | assert_almost_equal(datarr,f.variables['data_lz'][:]) 60 | dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': 61 | {'compressor': 'blosc_lz', 'shuffle': iblosc_shuffle}, 62 | 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} 63 | assert f.variables['data_lz'].filters() == dtest 64 | assert_almost_equal(datarr,f.variables['data_lz4'][:]) 65 | dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': 66 | {'compressor': 'blosc_lz4', 'shuffle': iblosc_shuffle}, 67 | 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} 68 | assert f.variables['data_lz4'].filters() == dtest 69 | assert_almost_equal(datarr,f.variables['data_lz4hc'][:]) 70 | dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': 71 | {'compressor': 'blosc_lz4hc', 'shuffle': iblosc_shuffle}, 72 | 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} 73 | assert f.variables['data_lz4hc'].filters() == dtest 74 | assert_almost_equal(datarr,f.variables['data_zlib'][:]) 75 | dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': 76 | {'compressor': 'blosc_zlib', 'shuffle': iblosc_shuffle}, 77 | 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} 78 | assert f.variables['data_zlib'].filters() == dtest 79 | assert_almost_equal(datarr,f.variables['data_zstd'][:]) 80 | dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': 81 | {'compressor': 'blosc_zstd', 'shuffle': iblosc_shuffle}, 82 | 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} 83 | assert f.variables['data_zstd'].filters() == dtest 84 | f.close() 85 | 86 | 87 | if __name__ == '__main__': 88 | unittest.main() 89 | -------------------------------------------------------------------------------- /test/test_masked2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import numpy as np 6 | from numpy import ma, seterr 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | from netCDF4 import Dataset, default_fillvals 9 | 10 | seterr(over='ignore') # don't print warning for overflow errors 11 | 12 | # test automatic conversion of masked arrays, and 13 | # packing/unpacking of short ints. 14 | 15 | FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 16 | FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 17 | FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 18 | datacheck1 =\ 19 | ma.array([0,5000.0,4000.0,0],dtype=np.float64,mask=[True,False,False,True]) 20 | datacheck2 =\ 21 | ma.array([3000.0,5000.0,4000.0,0],dtype=np.float64,mask=[False,False,False,True]) 22 | datacheck3 =\ 23 | ma.array([3000.0,5000.0,0,2000.0],dtype=np.float64,mask=[False,False,True,False]) 24 | mask = [False,True,False,False] 25 | datacheck4 = ma.array([1.5625,0,3.75,4.125],mask=mask,dtype=np.float32) 26 | fillval = default_fillvals[datacheck4.dtype.str[1:]] 27 | datacheck5 = np.array([1.5625,fillval,3.75,4.125],dtype=np.float32) 28 | 29 | class PrimitiveTypesTestCase(unittest.TestCase): 30 | 31 | def setUp(self): 32 | 33 | self.files = [FILE_NAME1] 34 | f = Dataset(FILE_NAME1,'w') 35 | x = f.createDimension('x',None) 36 | v = f.createVariable('v',np.int16,'x') 37 | v.scale_factor = np.array(1,np.float32) 38 | v.add_offset = np.array(32066,np.float32) 39 | v.missing_value = np.array(-9999,v.dtype) 40 | #v[0] not set, will be equal to _FillValue 41 | v[1]=5000 42 | v[2]=4000 43 | v[3]=v.missing_value 44 | f.close() 45 | 46 | self.files.append(FILE_NAME2) 47 | f = Dataset(FILE_NAME1,'r') 48 | # create a new file, copy data, but change missing value and 49 | # scale factor offset. 50 | f2 = Dataset(FILE_NAME2,'w') 51 | a = f2.createDimension('a',None) 52 | b = f2.createVariable('b',np.int16,'a') 53 | b.scale_factor = np.array(10.,np.float32) 54 | b.add_offset = np.array(0,np.float32) 55 | b.missing_value = np.array(9999,v.dtype) 56 | b[:] = f.variables['v'][:] 57 | f.close() 58 | f2.close() 59 | 60 | self.files.append(FILE_NAME3) 61 | f = Dataset(FILE_NAME3,'w') 62 | x = f.createDimension('x',None) 63 | # create variable with lossy compression 64 | v = f.createVariable('v',np.float32,'x',zlib=True,least_significant_digit=1) 65 | # assign masked array to that variable with one missing value. 66 | data =\ 67 | ma.array([1.5678,99.99,3.75145,4.127654],mask=np.array([False,True,False,False],np.bool_)) 68 | data.mask[1]=True 69 | v[:] = data 70 | f.close() 71 | 72 | def tearDown(self): 73 | # Remove the temporary files 74 | for f in self.files: 75 | os.remove(f) 76 | 77 | def runTest(self): 78 | """testing auto-conversion of masked arrays and packed integers""" 79 | 80 | f = Dataset(self.files[0]) 81 | data = f.variables['v'][:] 82 | assert_array_almost_equal(data,datacheck1) 83 | f.close() 84 | 85 | f = Dataset(self.files[1]) 86 | data = f.variables['b'][:] 87 | assert_array_almost_equal(data,datacheck1) 88 | f.close() 89 | 90 | f = Dataset(self.files[0],'a') 91 | # change first element from _FillValue to actual data. 92 | v = f.variables['v'] 93 | v[0]=3000 94 | f.close() 95 | f = Dataset(self.files[0],'r') 96 | # read data back in, check. 97 | data = f.variables['v'][:] 98 | assert_array_almost_equal(data,datacheck2) 99 | f.close() 100 | 101 | f = Dataset(self.files[0],'a') 102 | # change 3rd element to missing, 4 element to valid data. 103 | v = f.variables['v'] 104 | data = v[:] 105 | v[2]=-9999 106 | v[3]=2000 107 | f.close() 108 | f = Dataset(self.files[0],'r') 109 | # read data back in, check. 110 | data = f.variables['v'][:] 111 | assert_array_almost_equal(data,datacheck3) 112 | f.close() 113 | 114 | # check that masked arrays are handled correctly when lossy compression 115 | # is used. 116 | f = Dataset(self.files[2],'r') 117 | data = f.variables['v'][:] 118 | assert_array_almost_equal(data,datacheck4) 119 | assert_array_almost_equal(data.filled(),datacheck5) 120 | f.close() 121 | 122 | if __name__ == '__main__': 123 | unittest.main() 124 | -------------------------------------------------------------------------------- /test/test_masked4.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import tempfile 4 | import pathlib 5 | 6 | import numpy as np 7 | from numpy import ma 8 | from numpy.testing import assert_array_almost_equal 9 | from netCDF4 import Dataset, default_fillvals 10 | 11 | # Test use of valid_min/valid_max/valid_range in generation of masked arrays 12 | 13 | class SetValidMinMax(unittest.TestCase): 14 | 15 | def setUp(self): 16 | 17 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 18 | 19 | self.valid_min = -32765 20 | self.valid_max = 32765 21 | self.valid_range = [self.valid_min,self.valid_max] 22 | self.v = np.array([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2") 23 | self.v_ma = ma.array([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2", mask = [True, False, False, True]) 24 | 25 | self.scale_factor = 10. 26 | self.add_offset = 5. 27 | 28 | self.v_scaled = self.v * self.scale_factor + self.add_offset 29 | self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset 30 | 31 | f = Dataset(self.testfile, 'w') 32 | _ = f.createDimension('x', None) 33 | v = f.createVariable('v', "i2", 'x') 34 | v2 = f.createVariable('v2', "i2", 'x') 35 | v3 = f.createVariable('v3', "i2", 'x', fill_value=self.valid_min) 36 | 37 | v.missing_value = np.array(32767, v.dtype) 38 | v.valid_min = np.array(self.valid_min, v.dtype) 39 | v.valid_max = np.array(self.valid_max, v.dtype) 40 | v.valid_range = np.array(0, v.dtype) # issue 1013, this is wrong but should not raise an exception 41 | 42 | v[0] = self.valid_min-1 43 | v[1] = self.v[1] 44 | v[2] = self.v[2] 45 | v[3] = self.valid_max+1 46 | 47 | v2.missing_value = np.array(32767, v.dtype) 48 | v2.valid_range = np.array(self.valid_range, v.dtype) 49 | 50 | v2[0] = self.valid_range[0]-1 51 | v2[1] = self.v[1] 52 | v2[2] = self.v[2] 53 | v2[3] = self.valid_range[1]+1 54 | 55 | v3.missing_value = np.array(32767, v.dtype) 56 | v3.valid_max = np.array(self.valid_max, v.dtype) 57 | 58 | # _FillValue should act as valid_min 59 | v3[0] = v3._FillValue-1 60 | v3[1] = self.v[1] 61 | v3[2] = self.v[2] 62 | v3[3] = self.valid_max+1 63 | 64 | f.close() 65 | 66 | 67 | def tearDown(self): 68 | 69 | os.remove(self.testfile) 70 | 71 | 72 | def test_scaled(self): 73 | 74 | """Testing auto-conversion of masked arrays""" 75 | 76 | # Update test data file 77 | 78 | f = Dataset(self.testfile, "a") 79 | f.variables["v"].scale_factor = self.scale_factor 80 | f.variables["v"].add_offset = self.add_offset 81 | f.variables["v2"].scale_factor = self.scale_factor 82 | f.variables["v2"].add_offset = self.add_offset 83 | f.close() 84 | 85 | f = Dataset(self.testfile, "r") 86 | v = f.variables["v"][:] 87 | v2 = f.variables["v2"][:] 88 | v3 = f.variables["v3"][:] 89 | self.assertEqual(v.dtype, "f8") 90 | self.assertTrue(isinstance(v, np.ndarray)) 91 | self.assertTrue(isinstance(v, ma.masked_array)) 92 | assert_array_almost_equal(v, self.v_scaled) 93 | self.assertEqual(v2.dtype, "f8") 94 | self.assertTrue(isinstance(v2, np.ndarray)) 95 | self.assertTrue(isinstance(v2, ma.masked_array)) 96 | assert_array_almost_equal(v2, self.v_scaled) 97 | self.assertTrue(np.all(self.v_ma.mask == v.mask)) 98 | self.assertTrue(np.all(self.v_ma.mask == v2.mask)) 99 | # treating _FillValue as valid_min/valid_max was 100 | # too surprising, revert to old behaviour (issue #761) 101 | #self.assertTrue(np.all(self.v_ma.mask == v3.mask)) 102 | # check that underlying data is same as in netcdf file 103 | v = f.variables['v'] 104 | v.set_auto_scale(False) 105 | v = v[:] 106 | self.assertTrue(np.all(self.v == v.data)) 107 | f.close() 108 | 109 | # issue 672 110 | with Dataset(pathlib.Path(__file__).parent / "issue672.nc") as f: 111 | field = 'azi_angle_trip' 112 | v = f.variables[field] 113 | data1 = v[:] 114 | v.set_auto_scale(False) 115 | data2 = v[:] 116 | v.set_auto_maskandscale(False) 117 | data3 = v[:] 118 | assert data1[(data3 < v.valid_min)].mask.sum() == 12 119 | assert data2[(data3 < v.valid_min)].mask.sum() ==\ 120 | data1[(data3 < v.valid_min)].mask.sum() 121 | 122 | 123 | if __name__ == '__main__': 124 | unittest.main() 125 | -------------------------------------------------------------------------------- /test/test_types.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import TYPE_CHECKING, Any 3 | import unittest 4 | import os 5 | import tempfile 6 | import numpy as np 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | from numpy.random.mtrand import uniform 9 | import netCDF4 10 | if TYPE_CHECKING: 11 | from netCDF4 import CompressionLevel 12 | else: 13 | CompressionLevel = Any 14 | 15 | # test primitive data types. 16 | 17 | # create an n1dim by n2dim random ranarr. 18 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 19 | n1dim = 5 20 | n2dim = 10 21 | ranarr = 100.*uniform(size=(n1dim,n2dim)) 22 | zlib=False; complevel=0; shuffle=False; least_significant_digit=None 23 | datatypes = ['f8','f4','i1','i2','i4','i8','u1','u2','u4','u8','S1'] 24 | FillValue = 1.0 25 | issue273_data = np.ma.array(['z']*10,dtype='S1',\ 26 | mask=[False,False,False,False,False,True,False,False,False,False]) 27 | 28 | class PrimitiveTypesTestCase(unittest.TestCase): 29 | 30 | def setUp(self): 31 | self.file = FILE_NAME 32 | f = netCDF4.Dataset(self.file,'w') 33 | f.createDimension('n1', None) 34 | f.createDimension('n2', n2dim) 35 | for typ in datatypes: 36 | foo = f.createVariable( 37 | f"data_{typ}", 38 | typ, 39 | ('n1','n2',), 40 | zlib=zlib, 41 | complevel=complevel, # type: ignore # type checkers bad at narrowing 42 | shuffle=shuffle, 43 | least_significant_digit=least_significant_digit, 44 | fill_value=FillValue, 45 | ) 46 | #foo._FillValue = FillValue 47 | # test writing of _FillValue attribute for diff types 48 | # (should be cast to type of variable silently) 49 | foo[1:n1dim] = ranarr[1:n1dim] 50 | v = f.createVariable('issue271', np.dtype('S1'), [], fill_value=b'Z') 51 | v2 = f.createVariable('issue273', np.dtype('S1'), 'n2',\ 52 | fill_value='\x00') 53 | v2[:] = issue273_data 54 | v3 = f.createVariable('issue707',np.int8,'n2') 55 | v3.setncattr('missing_value',255) 56 | v3[:]=-1 57 | f.close() 58 | 59 | def tearDown(self): 60 | # Remove the temporary files 61 | os.remove(self.file) 62 | 63 | def runTest(self): 64 | """testing primitive data type """ 65 | f = netCDF4.Dataset(self.file) 66 | for typ in datatypes: 67 | data = f.variables['data_'+typ] 68 | data.set_auto_maskandscale(False) 69 | datarr: np.ndarray = data[1:n1dim] 70 | # fill missing data with _FillValue 71 | # ('S1' array will have some missing values) 72 | if hasattr(datarr, 'mask'): 73 | assert isinstance(datarr, np.ma.masked_array) 74 | datarr = datarr.filled() 75 | datfilled = data[0] 76 | # check to see that data type is correct 77 | if typ == 'S1': 78 | self.assertTrue(data.dtype.str[1:] in ['S1','U1']) 79 | else: 80 | self.assertTrue(data.dtype.str[1:] == typ) 81 | # check data in variable. 82 | if data.dtype.str[1:] != 'S1': 83 | #assert np.allclose(datarr, ranarr[1:n1dim].astype(data.dtype)) 84 | assert_array_almost_equal(datarr,ranarr[1:n1dim].astype(data.dtype)) 85 | else: 86 | assert datarr.tobytes() == ranarr[1:n1dim].astype(data.dtype).tobytes() 87 | # check that variable elements not yet written are filled 88 | # with the specified _FillValue. 89 | assert_array_equal(datfilled,np.asarray(data._FillValue,datfilled.dtype)) 90 | # issue 271 (_FillValue should be a byte for character arrays on 91 | # Python 3) 92 | v = f.variables['issue271'] 93 | assert type(v._FillValue) == bytes 94 | assert v._FillValue == b'Z' 95 | # issue 273 (setting _FillValue to null byte manually) 96 | v2 = f.variables['issue273'] 97 | assert type(v2._FillValue) == bytes 98 | assert v2._FillValue == b'\x00' 99 | assert str(issue273_data) == str(v2[:]) 100 | # issue 707 (don't apply missing_value if cast to variable type is 101 | # unsafe) 102 | v3 = f.variables['issue707'] 103 | assert_array_equal(v3[:],-1*np.ones(n2dim,v3.dtype)) 104 | f.close() 105 | # issue #850 (masked scalar char variable) 106 | f = netCDF4.Dataset(self.file,'a') 107 | a = f.createVariable('a', 'c', ()) 108 | a[:] = np.ma.masked 109 | f.close() 110 | 111 | if __name__ == '__main__': 112 | unittest.main() 113 | -------------------------------------------------------------------------------- /.github/workflows/build_latest.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test Linux with latest netcdf-c 2 | on: [push, pull_request] 3 | jobs: 4 | build-linux: 5 | name: Python (${{ matrix.python-version }}) 6 | runs-on: ubuntu-latest 7 | env: 8 | PNETCDF_VERSION: 1.14.1 9 | NETCDF_VERSION: 4.9.3 10 | NETCDF_DIR: ${{ github.workspace }}/.. 11 | NETCDF_EXTRA_CONFIG: --enable-pnetcdf 12 | #CC: mpicc.mpich 13 | CC: mpicc 14 | #NO_NET: 1 15 | strategy: 16 | matrix: 17 | python-version: ["3.14"] 18 | steps: 19 | 20 | - uses: actions/checkout@v6 21 | with: 22 | submodules: true 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v6 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install Ubuntu Dependencies 30 | run: | 31 | sudo apt-get update 32 | #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev openmpi-bin openmpi-common libopenmpi-dev libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 33 | sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 34 | echo "Download and build PnetCDF version ${PNETCDF_VERSION}" 35 | wget https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz 36 | tar -xzf pnetcdf-${PNETCDF_VERSION}.tar.gz 37 | pushd pnetcdf-${PNETCDF_VERSION} 38 | ./configure --prefix $NETCDF_DIR --enable-shared --disable-fortran --disable-cxx 39 | make -j 2 40 | sudo make install 41 | popd 42 | echo "Download and build netCDF version ${NETCDF_VERSION}" 43 | wget https://downloads.unidata.ucar.edu/netcdf-c/${NETCDF_VERSION}/netcdf-c-${NETCDF_VERSION}.tar.gz 44 | tar -xzf netcdf-c-${NETCDF_VERSION}.tar.gz 45 | pushd netcdf-c-${NETCDF_VERSION} 46 | #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" 47 | export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" 48 | export LDFLAGS="-L${NETCDF_DIR}/lib" 49 | #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" 50 | export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" 51 | which $CC 52 | ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 $NETCDF_EXTRA_CONFIG 53 | make -j 2 54 | sudo make install 55 | popd 56 | 57 | # - name: The job has failed 58 | # if: ${{ failure() }} 59 | # run: | 60 | # cd netcdf-c-${NETCDF_VERSION} 61 | # cat config.log 62 | 63 | - name: Install python dependencies via pip 64 | run: | 65 | python -m pip install --upgrade pip 66 | python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py typing-extensions 67 | 68 | - name: Install netcdf4-python 69 | run: | 70 | export PATH=${NETCDF_DIR}/bin:${PATH} 71 | export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c-${NETCDF_VERSION}/plugins/plugindir 72 | python -m pip install . --no-build-isolation 73 | 74 | - name: Test 75 | run: | 76 | export PATH=${NETCDF_DIR}/bin:${PATH} 77 | python checkversion.py 78 | # serial 79 | cd test 80 | python run_all.py 81 | # parallel (hdf5 for netcdf4, pnetcdf for netcdf3) 82 | cd ../examples 83 | #mpirun.mpich -np 4 python mpi_example.py 84 | mpirun -np 4 --oversubscribe python mpi_example.py 85 | if [ $? -ne 0 ] ; then 86 | echo "hdf5 mpi test failed!" 87 | exit 1 88 | else 89 | echo "hdf5 mpi test passed!" 90 | fi 91 | #mpirun.mpich -np 4 python mpi_example_compressed.py 92 | mpirun -np 4 --oversubscribe python mpi_example_compressed.py 93 | if [ $? -ne 0 ] ; then 94 | echo "hdf5 compressed mpi test failed!" 95 | exit 1 96 | else 97 | echo "hdf5 compressed mpi test passed!" 98 | fi 99 | #mpirun.mpich -np 4 python mpi_example.py NETCDF3_64BIT_DATA 100 | mpirun -np 4 --oversubscribe python mpi_example.py NETCDF3_64BIT_DATA 101 | if [ $? -ne 0 ] ; then 102 | echo "pnetcdf mpi test failed!" 103 | exit 1 104 | else 105 | echo "pnetcdf mpi test passed!" 106 | fi 107 | 108 | # - name: Tarball 109 | # run: | 110 | # export PATH=${NETCDF_DIR}/bin:${PATH} 111 | # python setup.py --version 112 | # check-manifest --version 113 | # check-manifest --verbose 114 | # pip wheel . -w dist --no-deps 115 | # twine check dist/* 116 | -------------------------------------------------------------------------------- /.github/workflows/build_old.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test Linux with older netcdf-c 2 | on: [push, pull_request] 3 | jobs: 4 | build-linux: 5 | name: Python (${{ matrix.python-version }}) 6 | runs-on: ubuntu-latest 7 | env: 8 | PNETCDF_VERSION: 1.12.1 9 | NETCDF_VERSION: 4.7.4 10 | NETCDF_DIR: ${{ github.workspace }}/.. 11 | NETCDF_EXTRA_CONFIG: --enable-pnetcdf 12 | #CC: mpicc.mpich 13 | CC: mpicc 14 | #NO_NET: 1 15 | strategy: 16 | matrix: 17 | python-version: ["3.14"] 18 | steps: 19 | 20 | - uses: actions/checkout@v6 21 | with: 22 | submodules: true 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v6 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install Ubuntu Dependencies 30 | run: | 31 | sudo apt-get update 32 | #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 33 | sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev 34 | echo "Download and build PnetCDF version ${PNETCDF_VERSION}" 35 | wget https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz 36 | tar -xzf pnetcdf-${PNETCDF_VERSION}.tar.gz 37 | pushd pnetcdf-${PNETCDF_VERSION} 38 | ./configure --prefix $NETCDF_DIR --enable-shared --disable-fortran --disable-cxx 39 | make -j 2 40 | sudo make install 41 | popd 42 | echo "Download and build netCDF version ${NETCDF_VERSION}" 43 | #wget https://downloads.unidata.ucar.edu/netcdf-c/${NETCDF_VERSION}/netcdf-c-${NETCDF_VERSION}.tar.gz 44 | wget https://www.gfd-dennou.org/arch/netcdf/unidata-mirror/netcdf-c-${NETCDF_VERSION}.tar.gz 45 | tar -xzf netcdf-c-${NETCDF_VERSION}.tar.gz 46 | pushd netcdf-c-${NETCDF_VERSION} 47 | #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" 48 | export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" 49 | export LDFLAGS="-L${NETCDF_DIR}/lib" 50 | #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" 51 | export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" 52 | ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 $NETCDF_EXTRA_CONFIG 53 | make -j 2 54 | sudo make install 55 | popd 56 | 57 | # - name: The job has failed 58 | # if: ${{ failure() }} 59 | # run: | 60 | # cd netcdf-c-${NETCDF_VERSION} 61 | # cat config.log 62 | 63 | - name: Install python dependencies via pip 64 | run: | 65 | python -m pip install --upgrade pip 66 | python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py typing-extensions 67 | 68 | - name: Install netcdf4-python 69 | run: | 70 | export PATH=${NETCDF_DIR}/bin:${PATH} 71 | export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c-${NETCDF_VERSION}/plugins/plugindir 72 | python -m pip install . --no-build-isolation 73 | 74 | - name: Test 75 | run: | 76 | export PATH=${NETCDF_DIR}/bin:${PATH} 77 | python checkversion.py 78 | # serial 79 | cd test 80 | python run_all.py 81 | # parallel (hdf5 for netcdf4, pnetcdf for netcdf3) 82 | cd ../examples 83 | #mpirun.mpich -np 4 python mpi_example.py 84 | mpirun -np 4 --oversubscribe python mpi_example.py 85 | if [ $? -ne 0 ] ; then 86 | echo "hdf5 mpi test failed!" 87 | exit 1 88 | else 89 | echo "hdf5 mpi test passed!" 90 | fi 91 | #mpirun.mpich -np 4 python mpi_example_compressed.py 92 | mpirun -np 4 --oversubscribe python mpi_example_compressed.py 93 | if [ $? -ne 0 ] ; then 94 | echo "hdf5 compressed mpi test failed!" 95 | exit 1 96 | else 97 | echo "hdf5 compressed mpi test passed!" 98 | fi 99 | #mpirun.mpich -np 4 python mpi_example.py NETCDF3_64BIT_DATA 100 | mpirun -np 4 --oversubscribe python mpi_example.py NETCDF3_64BIT_DATA 101 | if [ $? -ne 0 ] ; then 102 | echo "pnetcdf mpi test failed!" 103 | exit 1 104 | else 105 | echo "pnetcdf mpi test passed!" 106 | fi 107 | 108 | # - name: Tarball 109 | # run: | 110 | # export PATH=${NETCDF_DIR}/bin:${PATH} 111 | # python setup.py --version 112 | # check-manifest --version 113 | # check-manifest --verbose 114 | # pip wheel . -w dist --no-deps 115 | # twine check dist/* 116 | -------------------------------------------------------------------------------- /test/test_stringarr.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset, stringtochar, chartostring 2 | import random, numpy, string 3 | import unittest 4 | import os 5 | from numpy.testing import assert_array_equal, assert_array_almost_equal 6 | import numpy as np 7 | 8 | def generateString(length, alphabet=string.ascii_letters + string.digits + string.punctuation): 9 | return(''.join([random.choice(alphabet) for i in range(length)])) 10 | 11 | # test conversion of arrays of fixed-length strings 12 | # to arrays of characters (with an extra dimension), and vice-versa. 13 | 14 | FILE_NAME = 'tst_stringarr.nc' 15 | FILE_FORMAT = 'NETCDF4_CLASSIC' 16 | n2 = 20; nchar = 12; nrecs = 4 17 | data = numpy.empty((nrecs,n2),'S'+repr(nchar)) 18 | for nrec in range(nrecs): 19 | for n in range(n2): 20 | data[nrec,n] = generateString(nchar) 21 | datau = data.astype('U') 22 | datac = stringtochar(data, encoding='ascii') 23 | 24 | nx, n_strlen = 3, 12 25 | unicode_strings = np.array(['Münster', 'Liége', '東京'],dtype='U'+str(n_strlen)) 26 | unicode_strings2 = np.array(['Münster', 'Москва', '東京'],dtype='U'+str(n_strlen)) 27 | unicode_strings2_bytes = [b'M', b'\xc3', b'\xbc', b'n', b's', b't', b'e', b'r', b'\xd0', b'\x9c', b'\xd0', b'\xbe', b'\xd1', b'\x81', b'\xd0', b'\xba', b'\xd0', b'\xb2', b'\xd0', b'\xb0', b'\xe6', b'\x9d', b'\xb1', b'\xe4', b'\xba', b'\xac'] 28 | 29 | class StringArrayTestCase(unittest.TestCase): 30 | 31 | def setUp(self): 32 | self.file = FILE_NAME 33 | nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT) # type: ignore # FILE_FORMAT 34 | nc.createDimension('n1',None) 35 | nc.createDimension('n2',n2) 36 | nc.createDimension('nchar',nchar) 37 | nc.createDimension("x", nx) 38 | nc.createDimension("nstr", n_strlen) 39 | v = nc.createVariable('strings','S1',('n1','n2','nchar')) 40 | v2 = nc.createVariable('strings2','S1',('n1','n2','nchar')) 41 | # if _Encoding set, string array should automatically be converted 42 | # to a char array and vice-versan 43 | v2._Encoding = 'ascii' 44 | v3 = nc.createVariable('strings3','S1',('n1','n2','nchar')) 45 | v3._Encoding = 'ascii' 46 | for nrec in range(nrecs): 47 | datac = stringtochar(data,encoding='ascii') 48 | v[nrec] = datac[nrec] 49 | v2[:-1] = data[:-1] 50 | v2[-1] = data[-1] 51 | v2[-1,-1] = data[-1,-1] # write single element 52 | v2[-1,-1] = data[-1,-1].tobytes() # write single python string 53 | # _Encoding should be ignored if an array of characters is specified 54 | v3[:] = stringtochar(data, encoding='ascii') 55 | # test unicode strings (issue #1440) 56 | v4 = nc.createVariable("strings4", "S1", dimensions=("x", "nstr",)) 57 | v4._Encoding = "UTF-8" 58 | v4[:] = unicode_strings 59 | v4[1] = "Москва" 60 | nc.close() 61 | 62 | def tearDown(self): 63 | # Remove the temporary files 64 | os.remove(self.file) 65 | 66 | def runTest(self): 67 | """testing functions for converting arrays of chars to fixed-len strings""" 68 | nc = Dataset(FILE_NAME) 69 | assert nc.dimensions['n1'].isunlimited() == True 70 | v = nc.variables['strings'] 71 | v2 = nc.variables['strings2'] 72 | v3 = nc.variables['strings3'] 73 | v4 = nc.variables['strings4'] 74 | assert np.all(v4[:]==unicode_strings2) 75 | v4.set_auto_chartostring(False) 76 | assert (v4[:].compressed().tolist() == unicode_strings2_bytes) 77 | assert v.dtype.str[1:] in ['S1','U1'] 78 | assert v.shape == (nrecs,n2,nchar) 79 | for nrec in range(nrecs): 80 | data2 = chartostring(v[nrec],encoding='ascii') 81 | assert_array_equal(data2,datau[nrec]) 82 | data2 = v2[:] 83 | data2[0] = v2[0] 84 | data2[0,1] = v2[0,1] 85 | assert_array_equal(data2,datau) 86 | data3 = v3[:] 87 | assert_array_equal(data3,datau) 88 | # these slices should return a char array, not a string array 89 | data4 = v2[:,:,0] 90 | assert data4.dtype.itemsize == 1 91 | assert_array_equal(data4, datac[:,:,0]) 92 | data5 = v2[0,0:nchar,0] 93 | assert data5.dtype.itemsize == 1 94 | assert_array_equal(data5, datac[0,0:nchar,0]) 95 | # test turning auto-conversion off. 96 | v2.set_auto_chartostring(False) 97 | data6 = v2[:] 98 | assert data6.dtype.itemsize == 1 99 | assert_array_equal(data6, datac) 100 | nc.set_auto_chartostring(False) 101 | data7 = v3[:] 102 | assert data7.dtype.itemsize == 1 103 | assert_array_equal(data7, datac) 104 | nc.close() 105 | 106 | if __name__ == '__main__': 107 | unittest.main() 108 | -------------------------------------------------------------------------------- /.github/workflows/cibuildwheel.yml: -------------------------------------------------------------------------------- 1 | name: Wheels 2 | 3 | on: 4 | pull_request: 5 | push: 6 | tags: 7 | - "v*" 8 | release: 9 | types: 10 | - published 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | 17 | build_sdist: 18 | name: Build source distribution 19 | runs-on: ubuntu-22.04 20 | steps: 21 | - uses: actions/checkout@v6 22 | with: 23 | fetch-depth: 0 24 | 25 | - uses: actions/setup-python@v6 26 | name: Install Python 27 | with: 28 | python-version: 3.x 29 | 30 | - name: Install APT packages 31 | if: contains(${{ matrix.os }}, 'ubuntu') 32 | run: | 33 | sudo apt update 34 | sudo apt install libhdf5-dev libnetcdf-dev 35 | 36 | - name: Build sdist 37 | run: > 38 | pip install build 39 | && python -m build --sdist . --outdir dist 40 | 41 | - uses: actions/upload-artifact@v6 42 | with: 43 | name: pypi-artifacts 44 | path: ${{ github.workspace }}/dist/*.tar.gz 45 | 46 | 47 | build_bdist: 48 | name: "Build ${{ matrix.os }} (${{ matrix.arch }}) wheels" 49 | runs-on: ${{ matrix.os }} 50 | # Prevent hanging when building from emulation like aarch64. 51 | timeout-minutes: 300 52 | strategy: 53 | fail-fast: false 54 | matrix: 55 | include: 56 | - os: ubuntu-22.04 57 | arch: x86_64 58 | - os: ubuntu-22.04 59 | arch: aarch64 60 | 61 | steps: 62 | - uses: actions/checkout@v6 63 | with: 64 | fetch-depth: 0 65 | 66 | # For aarch64 support 67 | # https://cibuildwheel.pypa.io/en/stable/faq/#emulation 68 | - uses: docker/setup-qemu-action@v3 69 | with: 70 | platforms: all 71 | if: runner.os == 'Linux' && matrix.arch == 'aarch64' 72 | 73 | - name: Build oldest and newest Python 74 | shell: bash 75 | # On PRs we run only oldest and newest Python versions to reduce CI load. 76 | # Skips pypy and musllinux everywhere. 77 | # We are building 310, 311 and 314 for now. 78 | # (3.11 is the oldest version for which we support abi3 wheels) 79 | # These needs to rotate every new Python release. 80 | run: | 81 | set -x 82 | echo "CIBW_BUILD=cp310-* cp311-* cp314-*" >> $GITHUB_ENV 83 | set +x 84 | 85 | if: ${{ github.event_name }} == "pull_request" 86 | 87 | - name: "Building ${{ matrix.os }} (${{ matrix.arch }}) wheels" 88 | uses: pypa/cibuildwheel@v3.3.0 89 | env: 90 | CIBW_ARCHS: ${{ matrix.arch }} 91 | 92 | - uses: actions/upload-artifact@v6 93 | with: 94 | name: pypi-artifacts-${{ matrix.os }}-${{ matrix.arch }} 95 | path: ${{ github.workspace }}/wheelhouse/*.whl 96 | 97 | 98 | build_wheels_winmac: 99 | name: Build wheels for ${{matrix.arch}} on ${{ matrix.os }} 100 | runs-on: ${{ matrix.os }} 101 | strategy: 102 | fail-fast: false 103 | matrix: 104 | include: 105 | - os: windows-latest 106 | arch: AMD64 107 | - os: macos-14 108 | arch: arm64 109 | - os: macos-15-intel 110 | arch: x86_64 111 | 112 | steps: 113 | - uses: actions/checkout@v6 114 | with: 115 | fetch-depth: 0 116 | 117 | - uses: actions/setup-python@v6 118 | name: Install Python 119 | with: 120 | python-version: 3.x 121 | 122 | - name: Setup Micromamba Python ${{ matrix.python-version }} 123 | uses: mamba-org/setup-micromamba@v2 124 | with: 125 | environment-name: build 126 | init-shell: bash 127 | create-args: >- 128 | python=${{ matrix.python-version }} libnetcdf=4.9.3 --channel conda-forge 129 | 130 | - name: Build wheels for Windows/Mac 131 | uses: pypa/cibuildwheel@v3.3.0 132 | env: 133 | CIBW_ARCHS: ${{ matrix.arch }} 134 | 135 | - uses: actions/upload-artifact@v6 136 | with: 137 | name: pypi-artifacts-${{ matrix.os }}-${{ matrix.arch }} 138 | path: ${{ github.workspace }}/wheelhouse/*.whl 139 | 140 | 141 | show-artifacts: 142 | needs: [build_bdist, build_sdist, build_wheels_winmac] 143 | name: "Show artifacts" 144 | runs-on: ubuntu-22.04 145 | steps: 146 | - uses: actions/download-artifact@v7 147 | with: 148 | pattern: pypi-artifacts* 149 | path: ${{ github.workspace }}/dist 150 | merge-multiple: true 151 | 152 | - shell: bash 153 | run: | 154 | ls -lh ${{ github.workspace }}/dist 155 | 156 | 157 | publish-artifacts-pypi: 158 | needs: [build_bdist, build_sdist, build_wheels_winmac] 159 | name: "Publish to PyPI" 160 | runs-on: ubuntu-22.04 161 | # upload to PyPI for every tag starting with 'v' 162 | if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') 163 | steps: 164 | - uses: actions/download-artifact@v7 165 | with: 166 | pattern: pypi-artifacts* 167 | path: ${{ github.workspace }}/dist 168 | merge-multiple: true 169 | 170 | - uses: pypa/gh-action-pypi-publish@release/v1 171 | with: 172 | user: __token__ 173 | password: ${{ secrets.PYPI_PASSWORD }} 174 | print_hash: true 175 | -------------------------------------------------------------------------------- /test/test_multifile2.py: -------------------------------------------------------------------------------- 1 | from netCDF4 import Dataset, MFDataset, MFTime 2 | import numpy as np 3 | from numpy.random import seed, randint 4 | from numpy.testing import assert_array_equal, assert_equal 5 | from numpy import ma 6 | import tempfile, unittest, os, datetime 7 | import cftime 8 | from packaging.version import Version 9 | 10 | nx=100; ydim=5; zdim=10 11 | nfiles = 10 12 | ninc = nx/nfiles 13 | files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)] 14 | data = randint(0,10,size=(nx,ydim,zdim)) 15 | missval = 99 16 | data[::10] = missval 17 | data = ma.masked_values(data,missval) 18 | 19 | class VariablesTestCase(unittest.TestCase): 20 | 21 | def setUp(self): 22 | self.files = files 23 | for nfile,file in enumerate(self.files): 24 | f = Dataset(file,'w',format='NETCDF4_CLASSIC') 25 | #f.createDimension('x',None) 26 | f.createDimension('x',int(ninc)) 27 | f.createDimension('y',ydim) 28 | f.createDimension('z',zdim) 29 | f.history = 'created today' 30 | x = f.createVariable('x','i',('x',)) 31 | x.units = 'zlotys' 32 | dat = f.createVariable('data','i',('x','y','z',)) 33 | dat.long_name = 'phony data' 34 | dat.missing_value = missval 35 | nx1 = int(nfile*ninc); nx2 = int(ninc*(nfile+1)) 36 | #x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1)) 37 | x[:] = np.arange(nfile*ninc,ninc*(nfile+1)) 38 | #dat[0:ninc] = data[nx1:nx2] 39 | dat[:] = data[nx1:nx2] 40 | f.close() 41 | 42 | def tearDown(self): 43 | # Remove the temporary files 44 | for file in self.files: 45 | os.remove(file) 46 | 47 | def runTest(self): 48 | """testing multi-file dataset access""" 49 | # specify the aggregation dim (not necessarily unlimited) 50 | f = MFDataset(self.files,aggdim='x',check=True) 51 | assert f.history == 'created today' 52 | assert_array_equal(np.arange(0,nx),f.variables['x'][:]) 53 | varin = f.variables['data'] 54 | datin = varin[:] 55 | assert isinstance(data, np.ma.masked_array) 56 | assert_array_equal(datin.mask,data.mask) 57 | varin.set_auto_maskandscale(False) 58 | data2 = data.filled() 59 | assert varin.long_name == 'phony data' 60 | assert len(varin) == nx 61 | assert varin.shape == (nx,ydim,zdim) 62 | assert varin.dimensions == ('x','y','z') 63 | assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8]) 64 | assert varin[0,0,0] == data2[0,0,0] 65 | assert_array_equal(varin[:],data2) 66 | assert getattr(varin,'nonexistantatt',None) == None 67 | f.close() 68 | 69 | 70 | class NonuniformTimeTestCase(unittest.TestCase): 71 | ninc = 365 72 | def setUp(self): 73 | 74 | self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)] 75 | for nfile,file in enumerate(self.files): 76 | f = Dataset(file,'w',format='NETCDF4_CLASSIC') 77 | f.createDimension('time',None) 78 | f.createDimension('y',ydim) 79 | f.createDimension('z',zdim) 80 | f.history = 'created today' 81 | 82 | time = f.createVariable('time', 'f', ('time', )) 83 | #time.units = 'days since {0}-01-01'.format(1979+nfile) 84 | yr = 1979+nfile 85 | time.units = 'days since %s-01-01' % yr 86 | 87 | time.calendar = 'standard' 88 | 89 | x = f.createVariable('x','f',('time', 'y', 'z')) 90 | x.units = 'potatoes per square mile' 91 | 92 | nx1 = self.ninc*nfile; 93 | nx2 = self.ninc*(nfile+1) 94 | 95 | time[:] = np.arange(self.ninc) 96 | x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim)) 97 | 98 | f.close() 99 | 100 | def tearDown(self): 101 | # Remove the temporary files 102 | for file in self.files: 103 | os.remove(file) 104 | 105 | 106 | def runTest(self): 107 | # Get the real dates 108 | # skip this until cftime pull request #55 is in a released 109 | # version (1.0.1?). Otherwise, fix for issue #808 breaks this 110 | dates = [] 111 | if Version(cftime.__version__) >= Version('1.0.1'): 112 | for file in self.files: 113 | f = Dataset(file) 114 | t = f.variables['time'] 115 | dates.extend(cftime.num2date(t[:], t.units, t.calendar)) 116 | f.close() 117 | 118 | # Compare with the MF dates 119 | f = MFDataset(self.files,check=True) 120 | t = f.variables['time'] 121 | mfdates = cftime.num2date(t[:], t.units, t.calendar) 122 | 123 | T = MFTime(t) 124 | assert_equal(len(T), len(t)) 125 | assert_equal(T.shape, t.shape) 126 | assert_equal(T.dimensions, t.dimensions) 127 | assert_equal(T.typecode(), t.typecode()) 128 | # skip this until cftime pull request #55 is in a released 129 | # version (1.0.1?). Otherwise, fix for issue #808 breaks this 130 | if Version(cftime.__version__) >= Version('1.0.1'): 131 | assert_array_equal(cftime.num2date(T[:], T.units, T.calendar), dates) 132 | assert_equal(cftime.date2index(datetime.datetime(1980, 1, 2), T), 366) 133 | f.close() 134 | 135 | if __name__ == '__main__': 136 | unittest.main() 137 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "Cython>=0.29", 4 | "numpy>=2.0.0", 5 | "setuptools>=77.0.1", 6 | "setuptools_scm[toml]>=3.4", 7 | ] 8 | build-backend = "setuptools.build_meta" 9 | 10 | [project] 11 | name = "netCDF4" 12 | description = "Provides an object-oriented python interface to the netCDF version 4 library" 13 | authors = [ 14 | {name = "Jeff Whitaker", email = "whitaker.jeffrey@gmail.com"}, 15 | ] 16 | requires-python = ">=3.10" 17 | keywords = [ 18 | "numpy", "netcdf", "data", "science", "network", "oceanography", 19 | "meteorology", "climate", 20 | ] 21 | license = "MIT" 22 | license-files = ["LICENSE"] 23 | classifiers = [ 24 | "Development Status :: 3 - Alpha", 25 | "Programming Language :: Python :: 3", 26 | "Programming Language :: Python :: 3.10", 27 | "Programming Language :: Python :: 3.11", 28 | "Programming Language :: Python :: 3.12", 29 | "Programming Language :: Python :: 3.13", 30 | "Programming Language :: Python :: 3.14", 31 | "Intended Audience :: Science/Research", 32 | "Topic :: Software Development :: Libraries :: Python Modules", 33 | "Topic :: System :: Archiving :: Compression", 34 | "Operating System :: OS Independent", 35 | ] 36 | dependencies = [ 37 | "cftime", 38 | "certifi", 39 | "numpy", 40 | ] 41 | dynamic = ["version"] 42 | 43 | [project.optional-dependencies] 44 | tests = [ 45 | "Cython", 46 | "packaging", 47 | "pytest", 48 | "typing-extensions>=4.15.0", 49 | ] 50 | parallel = [ 51 | "mpi4py", 52 | ] 53 | 54 | [project.readme] 55 | text = """\ 56 | netCDF version 4 has many features not found in earlier versions of the library, 57 | such as hierarchical groups, zlib compression, multiple unlimited dimensions, 58 | and new data types. It is implemented on top of HDF5. This module implements 59 | most of the new features, and can read and write netCDF files compatible with 60 | older versions of the library. The API is modelled after Scientific.IO.NetCDF, 61 | and should be familiar to users of that module. 62 | """ 63 | content-type = "text/x-rst" 64 | 65 | [project.scripts] 66 | nc3tonc4 = "netCDF4.utils:nc3tonc4" 67 | nc4tonc3 = "netCDF4.utils:nc4tonc3" 68 | ncinfo = "netCDF4.utils:ncinfo" 69 | 70 | [project.urls] 71 | Documentation = "https://unidata.github.io/netcdf4-python/" 72 | Repository = "https://github.com/Unidata/netcdf4-python" 73 | 74 | [tool.setuptools.packages.find] 75 | where = ["src"] 76 | 77 | [tool.setuptools.package-data] 78 | "netCDF4.plugins" = ["*__nc*"] 79 | 80 | [tool.setuptools_scm] 81 | 82 | [tool.pytest.ini_options] 83 | pythonpath = ["test"] 84 | filterwarnings = [ 85 | "error", 86 | "ignore::UserWarning", 87 | ] 88 | 89 | [tool.mypy] 90 | files = ["src/netCDF4"] 91 | exclude = "utils.py" 92 | check_untyped_defs = true 93 | allow_redefinition = true 94 | # next 2 lines workarounds for mypy dealing with type_guards.py 95 | mypy_path = "test" 96 | explicit_package_bases = true 97 | 98 | [[tool.mypy.overrides]] 99 | ignore_missing_imports = true 100 | module = [ 101 | "cftime.*", 102 | "cython.*", 103 | "filter_availability", 104 | "matplotlib.*" 105 | ] 106 | 107 | [tool.cibuildwheel] 108 | build-verbosity = 1 109 | build-frontend = "build" 110 | skip = [ 111 | "*-musllinux*", 112 | "cp314t-*", 113 | ] 114 | test-extras = "tests" 115 | test-sources = [ 116 | "test", 117 | "pyproject.toml" 118 | ] 119 | test-command = [ 120 | '''python -c "import netCDF4; print(f'netCDF4 v{netCDF4.__version__}')"''', 121 | "pytest -s -rxs -v test", 122 | ] 123 | manylinux-x86_64-image = "ghcr.io/ocefpaf/manylinux_2_28_x86_64-netcdf" 124 | manylinux-aarch64-image = "ghcr.io/ocefpaf/manylinux_2_28_aarch64-netcdf" 125 | environment = {NETCDF4_LIMITED_API="1"} 126 | 127 | [tool.cibuildwheel.macos] 128 | # https://cibuildwheel.pypa.io/en/stable/faq/#macos-passing-dyld_library_path-to-delocate 129 | repair-wheel-command = """\ 130 | DYLD_FALLBACK_LIBRARY_PATH=/Users/runner/micromamba/envs/build/lib \ 131 | delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} \ 132 | """ 133 | 134 | [tool.cibuildwheel.windows] 135 | before-build = "python -m pip install delvewheel" 136 | repair-wheel-command = [ 137 | "delvewheel show --include blosc.dll;zstd.dll;lz4.dll {wheel}", 138 | "delvewheel repair --include blosc.dll;zstd.dll;lz4.dll -w {dest_dir} {wheel}", 139 | ] 140 | 141 | [[tool.cibuildwheel.overrides]] 142 | select = "*linux*" 143 | environment = {NETCDF_PLUGIN_DIR="/usr/local/hdf5/lib/plugin/"} 144 | 145 | [[tool.cibuildwheel.overrides]] 146 | select = "*-macosx_x86_64" 147 | inherit.environment = "append" 148 | environment = {MACOSX_DEPLOYMENT_TARGET="13.0",HDF5_DIR="/Users/runner/micromamba/envs/build",netCDF4_DIR="/Users/runner/micromamba/envs/build",PATH="${PATH}:/Users/runner/micromamba/envs/build/bin",NETCDF_PLUGIN_DIR="/Users/runner/micromamba/envs/build/hdf5/lib/plugin"} 149 | 150 | [[tool.cibuildwheel.overrides]] 151 | select = "*-macosx_arm64" 152 | inherit.environment = "append" 153 | environment = {MACOSX_DEPLOYMENT_TARGET="14.0",HDF5_DIR="/Users/runner/micromambe/envs/build",netCDF4_DIR="/Users/runner/micromambe/envs/build",PATH="${PATH}:/Users/runner/micromamba/envs/build/bin",NETCDF_PLUGIN_DIR="/Users/runner/micromamba/envs/build/hdf5/lib/plugin"} 154 | 155 | [[tool.cibuildwheel.overrides]] 156 | select = "*-win_*" 157 | inherit.environment = "append" 158 | environment = {HDF5_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library',netCDF4_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library',PATH='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library\\bin;${PATH}',NETCDF_PLUGIN_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library\\hdf5\\lib\\plugin'} 159 | -------------------------------------------------------------------------------- /test/test_rename.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | import netCDF4 6 | from netCDF4 import __has_rename_grp__ 7 | 8 | # test changing dimension, variable names 9 | # and deleting attributes. 10 | 11 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 12 | LAT_NAME="lat" 13 | LON_NAME="lon" 14 | LON_NAME2 = "longitude" 15 | LEVEL_NAME="level" 16 | TIME_NAME="time" 17 | VAR_NAME='temp' 18 | VAR_NAME2='wind' 19 | GROUP_NAME='subgroup' 20 | GROUP_NAME2='subgroup2' 21 | 22 | class VariablesTestCase(unittest.TestCase): 23 | 24 | def setUp(self): 25 | self.file = FILE_NAME 26 | f = netCDF4.Dataset(self.file, 'w') 27 | f.createDimension(LAT_NAME,73) 28 | f.createDimension(LON_NAME,145) 29 | f.createDimension(LEVEL_NAME,10) 30 | f.createDimension(TIME_NAME,None) 31 | if __has_rename_grp__: 32 | g = f.createGroup(GROUP_NAME) 33 | else: 34 | g = f.createGroup(GROUP_NAME2) 35 | g.createDimension(LAT_NAME,145) 36 | g.createDimension(LON_NAME,289) 37 | g.createDimension(LEVEL_NAME,20) 38 | g.createDimension(TIME_NAME,None) 39 | f.foo = 'bar' 40 | f.goober = 2 41 | g.foo = 'bar' 42 | g.goober = 2 43 | f.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME)) 44 | v = f.variables[VAR_NAME] 45 | v.bar = 'foo' 46 | v.slobber = 3 47 | g.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME)) 48 | v2 = g.variables[VAR_NAME] 49 | v2.bar = 'foo' 50 | v2.slobber = 3 51 | f.close() 52 | 53 | def tearDown(self): 54 | # Remove the temporary files 55 | os.remove(self.file) 56 | 57 | def runTest(self): 58 | """testing renaming of dimensions, variables and attribute deletion""" 59 | f = netCDF4.Dataset(self.file, 'r+') 60 | v = f.variables[VAR_NAME] 61 | names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] 62 | # check that dimension names are correct. 63 | for name in f.dimensions.keys(): 64 | self.assertTrue(name in names_check) 65 | names_check = [VAR_NAME] 66 | # check that variable names are correct. 67 | for name in f.variables.keys(): 68 | self.assertTrue(name in names_check) 69 | # rename dimension. 70 | f.renameDimension(LON_NAME,LON_NAME2) 71 | # rename variable. 72 | f.renameVariable(VAR_NAME,VAR_NAME2) 73 | # rename group. 74 | if __has_rename_grp__: 75 | f.renameGroup(GROUP_NAME,GROUP_NAME2) 76 | # check that new dimension names are correct. 77 | names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME] 78 | for name in f.dimensions.keys(): 79 | self.assertTrue(name in names_check) 80 | names_check = [VAR_NAME2] 81 | # check that new variable names are correct. 82 | for name in f.variables.keys(): 83 | self.assertTrue(name in names_check) 84 | g = f.groups[GROUP_NAME2] 85 | vg = g.variables[VAR_NAME] 86 | names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] 87 | # check that dimension names are correct. 88 | for name in g.dimensions.keys(): 89 | self.assertTrue(name in names_check) 90 | names_check = [VAR_NAME] 91 | # check that variable names are correct. 92 | for name in g.variables.keys(): 93 | self.assertTrue(name in names_check) 94 | # check that group name is correct. 95 | self.assertTrue(GROUP_NAME not in f.groups and GROUP_NAME2 in f.groups) 96 | # rename dimension. 97 | g.renameDimension(LON_NAME,LON_NAME2) 98 | # rename variable. 99 | g.renameVariable(VAR_NAME,VAR_NAME2) 100 | # check that new dimension names are correct. 101 | names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME] 102 | for name in g.dimensions.keys(): 103 | self.assertTrue(name in names_check) 104 | names_check = [VAR_NAME2] 105 | # check that new variable names are correct. 106 | for name in g.variables.keys(): 107 | self.assertTrue(name in names_check) 108 | # delete a global attribute. 109 | atts = f.ncattrs() 110 | del f.goober 111 | atts.remove('goober') 112 | self.assertTrue(atts == f.ncattrs()) 113 | atts = g.ncattrs() 114 | del g.goober 115 | atts.remove('goober') 116 | self.assertTrue(atts == g.ncattrs()) 117 | # delete a variable attribute. 118 | atts = v.ncattrs() 119 | del v.slobber 120 | atts.remove('slobber') 121 | self.assertTrue(atts == v.ncattrs()) 122 | atts = vg.ncattrs() 123 | del vg.slobber 124 | atts.remove('slobber') 125 | self.assertTrue(atts == vg.ncattrs()) 126 | f.close() 127 | # make sure attributes cannot be deleted, or vars/dims renamed 128 | # when file is open read-only. 129 | f = netCDF4.Dataset(self.file) 130 | v = f.variables[VAR_NAME2] 131 | self.assertRaises(RuntimeError, delattr, v, 'bar') 132 | self.assertRaises(RuntimeError, f.renameVariable, VAR_NAME2, VAR_NAME) 133 | self.assertRaises(RuntimeError, f.renameDimension, LON_NAME2, LON_NAME) 134 | g = f.groups[GROUP_NAME2] 135 | vg = g.variables[VAR_NAME2] 136 | self.assertRaises(RuntimeError, delattr, vg, 'bar') 137 | self.assertRaises(RuntimeError, g.renameVariable, VAR_NAME2, VAR_NAME) 138 | self.assertRaises(RuntimeError, g.renameDimension, LON_NAME2, LON_NAME) 139 | f.close() 140 | 141 | if __name__ == '__main__': 142 | unittest.main() 143 | -------------------------------------------------------------------------------- /test/test_compression_quant.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Any 2 | from numpy.random.mtrand import uniform 3 | from netCDF4 import Dataset, __has_quantization_support__ 4 | from numpy.testing import assert_almost_equal 5 | import numpy as np 6 | import os, tempfile, unittest 7 | if TYPE_CHECKING: 8 | from netCDF4 import CompressionLevel, QuantizeMode 9 | else: 10 | CompressionLevel = Any 11 | QuantizeMode = Any 12 | 13 | ndim = 100000 14 | nfiles = 7 15 | files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)] 16 | data_array = uniform(size=(ndim,)) 17 | nsd = 3 18 | nsb = 10 # for BitRound, use significant bits (~3.32 sig digits) 19 | complevel = 6 20 | 21 | def write_netcdf(filename,zlib,significant_digits,data,dtype='f8',shuffle=False,\ 22 | complevel: CompressionLevel = 6, quantize_mode: QuantizeMode = "BitGroom"): 23 | file = Dataset(filename,'w') 24 | file.createDimension('n', ndim) 25 | foo = file.createVariable('data',\ 26 | dtype,('n'),zlib=zlib,significant_digits=significant_digits,\ 27 | shuffle=shuffle,complevel=complevel,quantize_mode=quantize_mode) 28 | foo[:] = data 29 | file.close() 30 | file = Dataset(filename) 31 | data = file.variables['data'][:] 32 | file.close() 33 | 34 | 35 | @unittest.skipIf(not __has_quantization_support__, "missing quantisation support") 36 | class CompressionTestCase(unittest.TestCase): 37 | def setUp(self): 38 | self.files = files 39 | # no compression 40 | write_netcdf(self.files[0],False,None,data_array) 41 | # compressed, lossless, no shuffle. 42 | write_netcdf(self.files[1],True,None,data_array) 43 | # compressed, lossless, with shuffle. 44 | write_netcdf(self.files[2],True,None,data_array,shuffle=True) 45 | # compressed, lossy, no shuffle. 46 | write_netcdf(self.files[3],True,nsd,data_array) 47 | # compressed, lossy, with shuffle. 48 | write_netcdf(self.files[4],True,nsd,data_array,shuffle=True) 49 | # compressed, lossy, with shuffle, and alternate quantization. 50 | write_netcdf(self.files[5],True,nsd,data_array,quantize_mode='GranularBitRound',shuffle=True) 51 | # compressed, lossy, with shuffle, and alternate quantization. 52 | write_netcdf(self.files[6],True,nsb,data_array,quantize_mode='BitRound',shuffle=True) 53 | 54 | def tearDown(self): 55 | # Remove the temporary files 56 | for file in self.files: 57 | os.remove(file) 58 | 59 | def runTest(self): 60 | """testing zlib and shuffle compression filters""" 61 | uncompressed_size = os.stat(self.files[0]).st_size 62 | #print('uncompressed size = ',uncompressed_size) 63 | # check compressed data. 64 | f = Dataset(self.files[1]) 65 | size = os.stat(self.files[1]).st_size 66 | #print('compressed lossless no shuffle = ',size) 67 | assert_almost_equal(data_array,f.variables['data'][:]) 68 | assert f.variables['data'].filters() ==\ 69 | {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':complevel,'fletcher32':False} 70 | assert size < 0.95*uncompressed_size 71 | f.close() 72 | # check compression with shuffle 73 | f = Dataset(self.files[2]) 74 | size = os.stat(self.files[2]).st_size 75 | #print('compressed lossless with shuffle ',size) 76 | assert_almost_equal(data_array,f.variables['data'][:]) 77 | assert f.variables['data'].filters() ==\ 78 | {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':complevel,'fletcher32':False} 79 | assert size < 0.85*uncompressed_size 80 | f.close() 81 | # check lossy compression without shuffle 82 | f = Dataset(self.files[3]) 83 | size = os.stat(self.files[3]).st_size 84 | errmax = (np.abs(data_array-f.variables['data'][:])).max() 85 | #print('compressed lossy no shuffle = ',size,' max err = ',errmax) 86 | assert f.variables['data'].quantization() == (nsd,'BitGroom') 87 | assert errmax < 1.e-3 88 | assert size < 0.35*uncompressed_size 89 | f.close() 90 | # check lossy compression with shuffle 91 | f = Dataset(self.files[4]) 92 | size = os.stat(self.files[4]).st_size 93 | errmax = (np.abs(data_array-f.variables['data'][:])).max() 94 | print('compressed lossy with shuffle and standard quantization = ',size,' max err = ',errmax) 95 | assert f.variables['data'].quantization() == (nsd,'BitGroom') 96 | assert errmax < 1.e-3 97 | assert size < 0.24*uncompressed_size 98 | f.close() 99 | # check lossy compression with shuffle and alternate quantization 100 | f = Dataset(self.files[5]) 101 | size = os.stat(self.files[5]).st_size 102 | errmax = (np.abs(data_array-f.variables['data'][:])).max() 103 | print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) 104 | assert f.variables['data'].quantization() == (nsd,'GranularBitRound') 105 | assert errmax < 1.e-3 106 | assert size < 0.24*uncompressed_size 107 | f.close() 108 | # check lossy compression with shuffle and alternate quantization 109 | f = Dataset(self.files[6]) 110 | size = os.stat(self.files[6]).st_size 111 | errmax = (np.abs(data_array-f.variables['data'][:])).max() 112 | print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) 113 | assert f.variables['data'].quantization() == (nsb,'BitRound') 114 | assert errmax < 1.e-3 115 | assert size < 0.24*uncompressed_size 116 | f.close() 117 | 118 | if __name__ == '__main__': 119 | unittest.main() 120 | -------------------------------------------------------------------------------- /test/test_alignment.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from netCDF4 import set_alignment, get_alignment, Dataset 3 | from netCDF4 import __has_set_alignment__ 4 | import netCDF4 5 | import os 6 | import subprocess 7 | import tempfile 8 | import unittest 9 | 10 | # During testing, sometimes development versions are used. 11 | # They may be written as 4.9.1-development 12 | libversion_no_development = netCDF4.__netcdf4libversion__.split('-')[0] 13 | libversion = tuple(int(v) for v in libversion_no_development.split('.')) 14 | has_alignment = (libversion[0] > 4) or ( 15 | libversion[0] == 4 and (libversion[1] >= 9) 16 | ) 17 | try: 18 | has_h5ls = subprocess.check_call(['h5ls', '--version'], stdout=subprocess.PIPE) == 0 19 | except Exception: 20 | has_h5ls = False 21 | 22 | file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 23 | 24 | 25 | class AlignmentTestCase(unittest.TestCase): 26 | def setUp(self): 27 | 28 | self.file = file_name 29 | 30 | # This is a global variable in netcdf4, it must be set before File 31 | # creation 32 | if has_alignment: 33 | set_alignment(1024, 4096) 34 | assert get_alignment() == (1024, 4096) 35 | 36 | f = Dataset(self.file, 'w') 37 | f.createDimension('x', 4096) 38 | # Create many datasets so that we decrease the chance of 39 | # the dataset being randomly aligned 40 | for i in range(10): 41 | f.createVariable(f'data{i:02d}', np.float64, ('x',)) 42 | v = f.variables[f'data{i:02d}'] 43 | v[...] = 0 44 | f.close() 45 | if has_alignment: 46 | # ensure to reset the alignment to 1 (default values) so as not to 47 | # disrupt other tests 48 | set_alignment(1, 1) 49 | assert get_alignment() == (1, 1) 50 | 51 | def test_version_settings(self): 52 | if has_alignment: 53 | # One should always be able to set the alignment to 1, 1 54 | set_alignment(1, 1) 55 | assert get_alignment() == (1, 1) 56 | else: 57 | with self.assertRaises(RuntimeError): 58 | set_alignment(1, 1) 59 | with self.assertRaises(RuntimeError): 60 | get_alignment() 61 | 62 | def test_reports_alignment_capabilities(self): 63 | # Assert that the library reports that it supports alignment correctly 64 | assert has_alignment == __has_set_alignment__ 65 | 66 | # if we have no support for alignment, we have no guarantees on 67 | # how the data can be aligned 68 | @unittest.skipIf( 69 | not has_h5ls, 70 | "h5ls not found." 71 | ) 72 | @unittest.skipIf( 73 | not has_alignment, 74 | "No support for set_alignment in libnetcdf." 75 | ) 76 | def test_setting_alignment(self): 77 | # We choose to use h5ls instead of h5py since h5ls is very likely 78 | # to be installed alongside the rest of the tooling required to build 79 | # netcdf4-python 80 | # Output from h5ls is expected to look like: 81 | """ 82 | Opened "/tmp/tmpqexgozg1.nc" with sec2 driver. 83 | data00 Dataset {4096/4096} 84 | Attribute: DIMENSION_LIST {1} 85 | Type: variable length of 86 | object reference 87 | Attribute: _Netcdf4Coordinates {1} 88 | Type: 32-bit little-endian integer 89 | Location: 1:563 90 | Links: 1 91 | Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization 92 | Type: IEEE 64-bit little-endian float 93 | Address: 8192 94 | data01 Dataset {4096/4096} 95 | Attribute: DIMENSION_LIST {1} 96 | Type: variable length of 97 | object reference 98 | Attribute: _Netcdf4Coordinates {1} 99 | Type: 32-bit little-endian integer 100 | Location: 1:1087 101 | Links: 1 102 | Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization 103 | Type: IEEE 64-bit little-endian float 104 | Address: 40960 105 | [...] 106 | x Dataset {4096/4096} 107 | Attribute: CLASS scalar 108 | Type: 16-byte null-terminated ASCII string 109 | Attribute: NAME scalar 110 | Type: 64-byte null-terminated ASCII string 111 | Attribute: REFERENCE_LIST {10} 112 | Type: struct { 113 | "dataset" +0 object reference 114 | "dimension" +8 32-bit little-endian unsigned integer 115 | } 16 bytes 116 | Attribute: _Netcdf4Dimid scalar 117 | Type: 32-bit little-endian integer 118 | Location: 1:239 119 | Links: 1 120 | Storage: 16384 logical bytes, 0 allocated bytes 121 | Type: IEEE 32-bit big-endian float 122 | Address: 18446744073709551615 123 | """ 124 | h5ls_results = subprocess.check_output( 125 | ["h5ls", "--verbose", "--address", "--simple", self.file] 126 | ).decode() 127 | 128 | addresses = { 129 | f'data{i:02d}': -1 130 | for i in range(10) 131 | } 132 | 133 | data_variable = None 134 | for line in h5ls_results.split('\n'): 135 | if not line.startswith(' '): 136 | data_variable = line.split(' ')[0] 137 | # only process the data variables we care to inspect 138 | if data_variable not in addresses: 139 | continue 140 | line = line.strip() 141 | if line.startswith('Address:'): 142 | address = int(line.split(':')[1].strip()) 143 | addresses[data_variable] = address 144 | 145 | for key, address in addresses.items(): 146 | is_aligned = (address % 4096) == 0 147 | assert is_aligned, f"{key} is not aligned. Address = 0x{address:x}" 148 | 149 | # Alternative implementation in h5py 150 | # import h5py 151 | # with h5py.File(self.file, 'r') as h5file: 152 | # for i in range(10): 153 | # v = h5file[f'data{i:02d}'] 154 | # assert (dataset.id.get_offset() % 4096) == 0 155 | 156 | def tearDown(self): 157 | # Remove the temporary files 158 | os.remove(self.file) 159 | 160 | 161 | if __name__ == '__main__': 162 | unittest.main() 163 | -------------------------------------------------------------------------------- /test/test_masked3.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import tempfile 4 | 5 | import numpy as np 6 | from numpy import ma 7 | from numpy.testing import assert_array_almost_equal 8 | from netCDF4 import Dataset, default_fillvals 9 | 10 | # Test automatic conversion of masked arrays (set_auto_mask()) 11 | 12 | class SetAutoMaskTestBase(unittest.TestCase): 13 | 14 | """Base object for tests checking the functionality of set_auto_mask()""" 15 | 16 | def setUp(self): 17 | 18 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 19 | 20 | self.fillval = default_fillvals["i2"] 21 | self.v = np.array([self.fillval, 5, 4, -9999], dtype = "i2") 22 | self.v_ma = ma.array([self.fillval, 5, 4, -9999], dtype = "i2", mask = [True, False, False, True]) 23 | 24 | self.scale_factor = 10. 25 | self.add_offset = 5. 26 | 27 | self.v_scaled = self.v * self.scale_factor + self.add_offset 28 | self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset 29 | 30 | f = Dataset(self.testfile, 'w') 31 | _ = f.createDimension('x', None) 32 | v = f.createVariable('v', "i2", 'x') 33 | 34 | v.missing_value = np.array(-9999, v.dtype) 35 | 36 | # v[0] not set, will be equal to _FillValue 37 | v[1] = self.v[1] 38 | v[2] = self.v[2] 39 | v[3] = v.missing_value 40 | 41 | f.close() 42 | 43 | 44 | def tearDown(self): 45 | 46 | os.remove(self.testfile) 47 | 48 | 49 | class SetAutoMaskFalse(SetAutoMaskTestBase): 50 | 51 | def test_unscaled(self): 52 | 53 | """Testing auto-conversion of masked arrays for set_auto_mask(False)""" 54 | 55 | f = Dataset(self.testfile, "r") 56 | 57 | f.variables["v"].set_auto_mask(False) 58 | v = f.variables["v"][:] 59 | 60 | self.assertEqual(v.dtype, "i2") 61 | self.assertTrue(isinstance(v, np.ndarray)) 62 | self.assertTrue(not isinstance(v, ma.masked_array)) 63 | assert_array_almost_equal(v, self.v) 64 | 65 | f.close() 66 | 67 | 68 | def test_scaled(self): 69 | 70 | """Testing auto-conversion of masked arrays for set_auto_mask(False) with scaling""" 71 | 72 | # Update test data file 73 | 74 | f = Dataset(self.testfile, "a") 75 | f.variables["v"].scale_factor = self.scale_factor 76 | f.variables["v"].add_offset = self.add_offset 77 | f.close() 78 | 79 | # Note: Scaling variables is default if scale_factor and/or add_offset are present 80 | 81 | f = Dataset(self.testfile, "r") 82 | 83 | f.variables["v"].set_auto_mask(False) 84 | v = f.variables["v"][:] 85 | 86 | self.assertEqual(v.dtype, "f8") 87 | self.assertTrue(isinstance(v, np.ndarray)) 88 | self.assertTrue(not isinstance(v, ma.masked_array)) 89 | assert_array_almost_equal(v, self.v_scaled) 90 | 91 | f.close() 92 | 93 | 94 | class SetAutoMaskTrue(SetAutoMaskTestBase): 95 | 96 | def test_unscaled(self): 97 | 98 | """Testing auto-conversion of masked arrays for set_auto_mask(True)""" 99 | 100 | f = Dataset(self.testfile) 101 | 102 | f.variables["v"].set_auto_mask(True) # The default anyway... 103 | v_ma = f.variables['v'][:] 104 | 105 | self.assertEqual(v_ma.dtype, "i2") 106 | self.assertTrue(isinstance(v_ma, np.ndarray)) 107 | self.assertTrue(isinstance(v_ma, ma.masked_array)) 108 | assert_array_almost_equal(v_ma, self.v_ma) 109 | f.close() 110 | 111 | def test_scaled(self): 112 | 113 | """Testing auto-conversion of masked arrays for set_auto_mask(True)""" 114 | 115 | # Update test data file 116 | 117 | f = Dataset(self.testfile, "a") 118 | f.variables["v"].scale_factor = self.scale_factor 119 | f.variables["v"].add_offset = self.add_offset 120 | f.close() 121 | 122 | # Note: Scaling variables is default if scale_factor and/or add_offset are present 123 | 124 | f = Dataset(self.testfile) 125 | 126 | f.variables["v"].set_auto_mask(True) # The default anyway... 127 | v_ma = f.variables['v'][:] 128 | 129 | self.assertEqual(v_ma.dtype, "f8") 130 | self.assertTrue(isinstance(v_ma, np.ndarray)) 131 | self.assertTrue(isinstance(v_ma, ma.masked_array)) 132 | assert_array_almost_equal(v_ma, self.v_ma_scaled) 133 | f.close() 134 | 135 | 136 | class GlobalSetAutoMaskTest(unittest.TestCase): 137 | 138 | def setUp(self): 139 | 140 | self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 141 | 142 | f = Dataset(self.testfile, 'w') 143 | 144 | grp1 = f.createGroup('Group1') 145 | grp2 = f.createGroup('Group2') 146 | f.createGroup('Group3') # empty group 147 | 148 | f.createVariable('var0', "i2", ()) 149 | grp1.createVariable('var1', 'f8', ()) 150 | grp2.createVariable('var2', 'f4', ()) 151 | 152 | f.close() 153 | 154 | def tearDown(self): 155 | 156 | os.remove(self.testfile) 157 | 158 | def runTest(self): 159 | 160 | # Note: The default behaviour is to to have both auto-masking and auto-scaling activated. 161 | # This is already tested in tst_scaled.py, so no need to repeat here. Instead, 162 | # disable auto-masking and auto-scaling altogether. 163 | 164 | f = Dataset(self.testfile, "r") 165 | 166 | # Neither scaling and masking enabled 167 | 168 | f.set_auto_maskandscale(False) 169 | 170 | v0 = f.variables['var0'] 171 | v1 = f.groups['Group1'].variables['var1'] 172 | v2 = f.groups['Group2'].variables['var2'] 173 | 174 | self.assertFalse(v0.scale) 175 | self.assertFalse(v0.mask) 176 | 177 | self.assertFalse(v1.scale) 178 | self.assertFalse(v1.mask) 179 | 180 | self.assertFalse(v2.scale) 181 | self.assertFalse(v2.mask) 182 | 183 | # No auto-masking, but auto-scaling 184 | 185 | f.set_auto_maskandscale(True) 186 | f.set_auto_mask(False) 187 | 188 | self.assertTrue(v0.scale) 189 | self.assertFalse(v0.mask) 190 | 191 | self.assertTrue(v1.scale) 192 | self.assertFalse(v1.mask) 193 | 194 | self.assertTrue(v2.scale) 195 | self.assertFalse(v2.mask) 196 | 197 | f.close() 198 | 199 | 200 | if __name__ == '__main__': 201 | unittest.main() 202 | -------------------------------------------------------------------------------- /test/test_compoundvar.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import tempfile 5 | from netCDF4 import Dataset, CompoundType 6 | import numpy as np 7 | from numpy.testing import assert_array_equal, assert_array_almost_equal 8 | 9 | # test compound data types. 10 | 11 | FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name 12 | DIM_NAME = 'phony_dim' 13 | GROUP_NAME = 'phony_group' 14 | VAR_NAME = 'phony_compound_var' 15 | TYPE_NAME1 = 'cmp1' 16 | TYPE_NAME2 = 'cmp2' 17 | TYPE_NAME3 = 'cmp3' 18 | TYPE_NAME4 = 'cmp4' 19 | TYPE_NAME5 = 'cmp5' 20 | DIM_SIZE=3 21 | # unaligned data types (note they are nested) 22 | dtype1=np.dtype([('i', 'i2'), ('j', 'i8')]) 23 | dtype2=np.dtype([('x', 'f4',), ('y', 'f8',(3,2))]) 24 | dtype3=np.dtype([('xx', dtype1), ('yy', dtype2)]) 25 | dtype4=np.dtype([('xxx',dtype3),('yyy','f8', (4,))]) 26 | dtype5=np.dtype([('x1', dtype1), ('y1', dtype2)]) 27 | # aligned data types 28 | dtype1a = np.dtype({'names':['i','j'],'formats':['f4') 19 | warnings.simplefilter('ignore') # ignore UserWarnings generated below 20 | ll = dataset.createVariable('little-little', 'f4', dims) 23 | bb = dataset.createVariable('big-big', '>f4', dims) 24 | ll[:] = little 25 | lb[:] = big 26 | bl[:] = little 27 | bb[:] = big 28 | dataset.close() 29 | 30 | def check_byteswap(file, data): 31 | # byteswapping is done internally to native endian format 32 | # when numpy array has non-native byte order. The byteswap was 33 | # initially done in place, which caused the numpy array to 34 | # be modified in the calling program. Pull request #555 35 | # changed the byteswap to a copy, and this test checks 36 | # to make sure the input numpy array is not modified. 37 | dataset = netCDF4.Dataset(file,'w') 38 | dataset.createDimension('time', None) 39 | dataset.createDimension('space', 4) 40 | dims = ('time', 'space') 41 | bl = dataset.createVariable('big-little', np.float32, dims, endian='big') 42 | data2 = data.copy() 43 | bl[:] = data 44 | dataset.close() 45 | f = netCDF4.Dataset(file) 46 | bl = f.variables['big-little'][:] 47 | # check data. 48 | assert_array_almost_equal(data, data2) 49 | assert_array_almost_equal(bl, data) 50 | f.close() 51 | 52 | 53 | def check_data(file, data): 54 | f = netCDF4.Dataset(file) 55 | ll = f.variables['little-little'][:] 56 | lb = f.variables['little-big'][:] 57 | bb = f.variables['big-big'][:] 58 | bl = f.variables['big-little'][:] 59 | # check data. 60 | assert_array_almost_equal(ll, data) 61 | assert_array_almost_equal(lb, data) 62 | assert_array_almost_equal(bl, data) 63 | assert_array_almost_equal(bb, data) 64 | f.close() 65 | 66 | def issue310(file): 67 | mval = 999.; fval = -999 68 | nc = netCDF4.Dataset(file, "w") 69 | nc.createDimension('obs', 10) 70 | if netCDF4.is_native_little: 71 | endian='big' 72 | elif netCDF4.is_native_big: 73 | endian='little' 74 | else: 75 | raise ValueError('cannot determine native endianness') 76 | var_big_endian = nc.createVariable( 77 | 'obs_big_endian', '>f8', ('obs', ), endian=endian, fill_value=fval, # type: ignore # mypy is bad at narrowing endian 78 | ) 79 | # use default _FillValue 80 | var_big_endian2 = nc.createVariable( 81 | 'obs_big_endian2', '>f8', ('obs', ), endian=endian, # type: ignore # mypy is bad at narrowing endian 82 | ) 83 | # NOTE: missing_value be written in same byte order 84 | # as variable, or masked array won't be masked correctly 85 | # when data is read in. 86 | var_big_endian.missing_value = mval 87 | var_big_endian[0]=np.pi 88 | var_big_endian[1]=mval 89 | var_big_endian2.missing_value = mval 90 | var_big_endian2[0]=np.pi 91 | var_big_endian2[1]=mval 92 | var_native_endian = nc.createVariable(\ 93 | 'obs_native_endian', '