diff --git a/.travis.yml b/.travis.yml index e018f67c..966c9d11 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,7 @@ addons: before_install: - pip install pytest-cov + - pip install pytest-flake8 - pip install coveralls - pip install future - pip install numpy @@ -41,7 +42,7 @@ install: - python setup.py install script: - - pytest -vs --cov=pysatModels/ + - pytest -vs --cov=pysatModels/ --flake8 after_success: - coveralls diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f6667895..df889ce5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,18 +2,20 @@ Contributing ============ Bug reports, feature suggestions and other contributions are greatly -appreciated! pysatModels is a community-driven project and welcomes both feedback and contributions. +appreciated! pysatModels is a community-driven project and welcomes both +feedback and contributions. Short version ============= -* Submit bug reports and feature requests at `GitHub `_ +* Submit bug reports and feature requests at + [GitHub Issues](https://github.com/pysat/pysatModels/issues) * Make pull requests to the ``develop`` branch Bug reports =========== -When `reporting a bug `_ please +When [reporting a bug](https://github.com/pysat/pysatModels/issues) please include: * Your operating system name and version @@ -24,7 +26,7 @@ Feature requests and feedback ============================= The best way to send feedback is to file an issue at -`GitHub `_. +[GitHub Issues](https://github.com/pysat/pysatModels/issues). If you are proposing a feature: @@ -38,40 +40,48 @@ Development To set up `pysatModels` for local development: -1. `Fork pysat on GitHub `_. +1. Fork pysat on [GitHub](https://github.com/pysat/pysatModels/fork). 2. Clone your fork locally:: - git clone git@github.com:your_name_here/pysatModels.git + git clone git@github.com:your_name_here/pysatModels.git 3. Create a branch for local development:: - git checkout -b name-of-your-bugfix-or-feature + git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. Tests for new instruments are performed automatically. Tests for custom functions should be added to the - appropriately named file in ``pysatModels/tests``. For example, the averaging routines in avg.py are tested in ``pysatModels/tests/test_avg.py``. If no - test file exists, then you should create one. This testing uses pytest, which - will run tests on any python file in the test directory that starts with - ``test_``. + appropriately named file in ``pysatModels/tests``. For example, the + averaging routines in avg.py are tested in + ``pysatModels/tests/test_avg.py``. If no test file exists, then you should + create one. This testing uses pytest, which will run tests on any python + file in the test directory that starts with ``test_``. -4. When you're done making changes, run all the checks to ensure that nothing - is broken on your local system:: +4. When you're done making changes, run all the checks from the + ``pysatModels/tests`` directory to ensure that nothing is broken on your + local system. You may need to install + [pytest](https://docs.pytest.org/en/latest/) and + [pytest-flake8](https://pypi.org/project/pytest-flake8/) first. :: - pytest -vs + python -m pytest -vs --flake8 -5. Update/add documentation (in ``docs``), if relevant +5. Update or add documentation (in ``docs``), if relevant. If you have added + a new routine, you will need to add an example in the ``docs/examples`` + folder. -5. Commit your changes and push your branch to GitHub:: +6. Commit your changes and push your branch to GitHub. Our commit statements + follow the basic rules in the + [Numpy/SciPy workflow](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html):: - git add . - git commit -m "Brief description of your changes" - git push origin name-of-your-bugfix-or-feature + git add . + git commit -m "TYPE: Brief description of your changes" + git push origin name-of-your-bugfix-or-feature -6. Submit a pull request through the GitHub website. Pull requests should be +7. Submit a pull request through the GitHub website. Pull requests should be made to the ``develop`` branch. Pull Request Guidelines @@ -85,10 +95,10 @@ For merging, you should: 1. Include an example for use 2. Add a note to ``CHANGELOG.md`` about the changes 3. Ensure that all checks passed (current checks include Scrutinizer, Travis-CI, - and Coveralls) [1]_ + and Coveralls). -.. [1] If you don't have all the necessary Python versions available locally or - have trouble building all the testing environments, you can rely on - Travis to run the tests for each change you add in the pull request. - Because testing here will delay tests by other developers, please ensure - that the code passes all tests on your local system first. + If you don't have all the necessary Python versions available locally or + have trouble building all the testing environments, you can rely on + Travis to run the tests for each change you add in the pull request. + Because testing here will delay tests by other developers, please ensure + that the code passes all tests on your local system first. diff --git a/pysatModels/__init__.py b/pysatModels/__init__.py index db3a8734..bcfd1d33 100644 --- a/pysatModels/__init__.py +++ b/pysatModels/__init__.py @@ -5,7 +5,7 @@ # ----------------------------------------------------------------------------- """ pysatModels -=============== +=========== Model utilities designed to facilitate studies that integrate observational and modelled data sets. @@ -18,8 +18,9 @@ import logging import os -from pysatModels import (utils) -from pysatModels import (models) +# Import key modules and skip F401 testing in flake8 +from pysatModels import (utils) # noqa: F401 +from pysatModels import (models) # noqa: F401 # set the version here = os.path.abspath(os.path.dirname(__file__)) diff --git a/pysatModels/models/__init__.py b/pysatModels/models/__init__.py index 321285a0..af3fa1d4 100644 --- a/pysatModels/models/__init__.py +++ b/pysatModels/models/__init__.py @@ -14,4 +14,5 @@ from __future__ import absolute_import from __future__ import unicode_literals -from pysatModels.models import ucar_tiegcm +# Import key modules and skip F401 testing in flake8 +from pysatModels.models import ucar_tiegcm # noqa: F401 diff --git a/pysatModels/tests/test_utils_extract.py b/pysatModels/tests/test_utils_extract.py index 8b905076..08894bd1 100644 --- a/pysatModels/tests/test_utils_extract.py +++ b/pysatModels/tests/test_utils_extract.py @@ -87,7 +87,8 @@ def test_bad_arg_input(self, bad_index, bad_input, err_msg): "interpn only understands the methods"), ("model_label", 1, "Unknown format code "), ("time_method", "fun", "unknown time method"), - ("pair_method", "fun", "unknown pairing method")]) + ("pair_method", "fun", + "unknown pairing method")]) def test_bad_kwarg_input(self, bad_key, bad_val, err_msg): """ Test for expected failure with bad kwarg input """ kwargs = {bad_key: bad_val} diff --git a/pysatModels/tests/test_utils_match.py b/pysatModels/tests/test_utils_match.py index a27e7ce3..75950e1a 100644 --- a/pysatModels/tests/test_utils_match.py +++ b/pysatModels/tests/test_utils_match.py @@ -8,7 +8,6 @@ import pysat -import pysatModels as ps_mod import pysatModels.utils.match as match @@ -77,12 +76,16 @@ def teardown(self): del self.input_args, self.required_kwargs, self.inst, self.model @pytest.mark.parametrize("del_key,err_msg", - [("inst_lon_name", "Need longitude name for inst"), - ("mod_lon_name", "Need longitude name for model"), - ("inst_name", "Must provide instrument location"), + [("inst_lon_name", + "Need longitude name for inst"), + ("mod_lon_name", + "Need longitude name for model"), + ("inst_name", + "Must provide instrument location"), ("mod_name", "Must provide the same number"), ("mod_units", "Must provide units for each "), - ("mod_datetime_name", "Need datetime coordinate"), + ("mod_datetime_name", + "Need datetime coordinate"), ("mod_time_name", "Need time coordinate"), ("inst_clean_rout", "Need routine to clean")]) def test_input_failure(self, del_key, err_msg): diff --git a/pysatModels/utils/__init__.py b/pysatModels/utils/__init__.py index c93c3432..a522cffc 100644 --- a/pysatModels/utils/__init__.py +++ b/pysatModels/utils/__init__.py @@ -5,14 +5,15 @@ # ----------------------------------------------------------------------------- """ pysatModels.utils -===================== +================= Utilities designed to extract, match, and compare modelled and observed data """ -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, unicode_literals -from pysatModels.utils import extract -from pysatModels.utils import match -from pysatModels.utils import compare +# Import key modules and skip F401 testing in flake8 +from pysatModels.utils import extract # noqa: F401 +from pysatModels.utils import match # noqa: F401 +from pysatModels.utils import compare # noqa: F401 diff --git a/pysatModels/utils/compare.py b/pysatModels/utils/compare.py index d623150f..a4e413a7 100644 --- a/pysatModels/utils/compare.py +++ b/pysatModels/utils/compare.py @@ -116,8 +116,8 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[], raise ValueError('must provide Dataset of paired observations') if len(inst_name) != len(mod_name): - raise ValueError('must provide equal number of instrument and model ' + - 'data names for comparison') + raise ValueError(''.join(['must provide equal number of instrument ', + 'and model data names for comparison'])) if not np.all([iname in pairs.data_vars.keys() for iname in inst_name]): raise ValueError('unknown instrument data value supplied') @@ -130,9 +130,9 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[], known_methods.extend(list(grouped_methods.keys())) unknown_methods = [mm for mm in methods if mm not in list(method_rout.keys())] - raise ValueError('unknown statistical method(s) requested:\n' + - '{:}\nuse only:\n{:}'.format(unknown_methods, - known_methods)) + raise ValueError(''.join(['unknown statistical method(s) requested:\n', + '{:}\nuse only:\n'.format(unknown_methods), + '{:}'.format(unknown_methods)])) # Initialize the output stat_dict = {iname: dict() for iname in inst_name} @@ -170,6 +170,6 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[], # instead of stopping processing. Only valid statistics will # be included in output ps_mod.logger.info("{:s} can't use {:s}: {:}".format(iname, - mm, err)) + mm, err)) return stat_dict, data_units diff --git a/pysatModels/utils/extract.py b/pysatModels/utils/extract.py index 39418328..f6bc3d41 100644 --- a/pysatModels/utils/extract.py +++ b/pysatModels/utils/extract.py @@ -99,8 +99,8 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name, raise ValueError(estr) if len(mod_name) != len(mod_units): - raise ValueError('Must provide units for each model location ' + - 'attribute') + raise ValueError(''.join(['Must provide units for each model location', + ' attribute'])) if mod_time_name not in model.coords: raise ValueError("Unknown model time coordinate key name") @@ -122,21 +122,22 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name, inst.units_label]) # create initial fake regular grid index in inst - inst_model_coord = inst[inst_name[0]]*0 + inst_model_coord = inst[inst_name[0]] * 0 # we need to create altitude index from model # collect relevant inputs # First, model locations for interpolation # we use the dimensions associated with model altitude # in the order provided - points = [model[dim].values/temp_scale for dim, temp_scale in zip(mod_name, - inst_scale)] + points = [model[dim].values / temp_scale + for dim, temp_scale in zip(mod_name, inst_scale)] # time first points.insert(0, model[mod_datetime_name].values.astype(int)) # create interpolator interp = interpolate.RegularGridInterpolator(points, - np.log(model[mod_alt].values/alt_scale), + np.log(model[mod_alt].values + / alt_scale), bounds_error=False, fill_value=None) # use this interpolator to figure out what altitudes we are at @@ -149,7 +150,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name, # log of instrument altitude log_ialt = np.log(inst[inst_alt]) # initial difference signal - diff = log_ialt*0 + 2.*tol + diff = log_ialt * 0 + 2.0 * tol while np.any(np.abs(diff) > tol): # create input array using satellite time/position # replace the altitude coord with the fake tiegcm one @@ -160,7 +161,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name, coords.append(inst_model_coord) else: # scale other dimensions to the model - coords.append(inst[coord]*iscale) + coords.append(inst[coord] * iscale) coords.insert(0, inst.index.values.astype(int)) # to peform the interpolation we need points @@ -179,7 +180,7 @@ def instrument_altitude_to_model_pressure(inst, model, inst_name, mod_name, # shift index in inst for model pressure level # in the opposite direction to diff # reduced value by scale, the 'scale height' - inst_model_coord -= diff/scale + inst_model_coord -= diff / scale # achieved model altitude inst[inst_out_alt] = np.e**orbit_alt @@ -287,8 +288,8 @@ def instrument_view_through_model(inst, model, inst_name, mod_name, raise ValueError(estr) if len(mod_name) != len(mod_units): - raise ValueError('Must provide units for each model location ' + - 'attribute') + raise ValueError(''.join(['Must provide units for each model location', + ' attribute'])) if mod_time_name not in model.coords: raise ValueError("Unknown model time coordinate key name") @@ -324,14 +325,12 @@ def instrument_view_through_model(inst, model, inst_name, mod_name, points.append(model[mod_datetime_name].values.astype(int)) # now spatial for iscale, var in zip(inst_scale, mod_name): - points.append(model[var].values/iscale) + points.append(model[var].values / iscale) # create the interpolator - interp[label] = interpolate.RegularGridInterpolator(points, - model[label].values, - bounds_error=False, - fill_value=None, - method=method) + interp[label] = interpolate.RegularGridInterpolator( + points, model[label].values, bounds_error=False, fill_value=None, + method=method) # apply it at observed locations and store result output_names.append('_'.join((model_label, label))) inst[output_names[-1]] = interp[label](inst_pts) @@ -416,8 +415,8 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name, raise ValueError(estr) if len(mod_name) != len(mod_units): - raise ValueError('Must provide units for each model location ' + - 'attribute') + raise ValueError(''.join(['Must provide units for each model location', + ' attribute'])) # ensure coordinate dimensions match for var in sel_name: @@ -447,8 +446,8 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name, inst.meta[iname, inst.units_label]) # First, model locations for interpolation (regulargrid) - coords = [model[dim].values/temp_scale for dim, temp_scale in zip(mod_name, - inst_scale)] + coords = [model[dim].values / temp_scale + for dim, temp_scale in zip(mod_name, inst_scale)] # time first coords.insert(0, model[mod_datetime_name].values.astype(int)) @@ -489,11 +488,11 @@ def instrument_view_irregular_model(inst, model, inst_name, mod_name, else: max_sel_val = max_pts_alt # perform downselection - idx, = np.where((points[:, update_dim] >= min_sel_val) & - (points[:, update_dim] <= max_sel_val)) + idx, = np.where((points[:, update_dim] >= min_sel_val) + & (points[:, update_dim] <= max_sel_val)) points = points[idx, :] - ps_mod.logger.debug('Remaining points after downselection ' - + str(len(idx))) + ps_mod.logger.debug('Remaining points after downselection {:d}'.format( + len(idx))) # create input array using inst time/position coords = [inst[coord] for coord in inst_name] @@ -599,8 +598,8 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, raise ValueError(estr) if len(mod_name) != len(mod_units): - raise ValueError('Must provide units for each model location ' + - 'attribute') + raise ValueError(''.join(['Must provide units for each model location', + ' attribute'])) if mod_time_name not in model.coords: raise ValueError("Unknown model time coordinate key name") @@ -613,7 +612,7 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, # Ensure mod_name is a list mod_name = list(mod_name) - + # Remove any model coordinates from the modelled data to interpolate sel_name = sel_name[[mdat not in mod_name for mdat in sel_name]] @@ -663,14 +662,15 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, # resolution of a model run mind = list() iind = list() - del_sec = abs(mod_datetime-inst.index[:, np.newaxis]).astype(float) * 1.0e-9 + del_sec = abs(mod_datetime + - inst.index[:, np.newaxis]).astype(float) * 1.0e-9 for inst_ind, mod_ind in enumerate(del_sec.argmin(axis=1)): if del_sec[inst_ind, mod_ind] <= min_del: if mod_ind in mind and pair_method == 'closest': # Test to see if this model observation has multiple pairings old_ind = mind.index(mod_ind) - if(del_sec[inst_ind, mod_ind] < - del_sec[iind[old_ind], mind[old_ind]]): + if(del_sec[inst_ind, mod_ind] + < del_sec[iind[old_ind], mind[old_ind]]): # If this one is closer, keep it iind[old_ind] = inst_ind mind[old_ind] = mod_ind @@ -709,7 +709,6 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, # Determine the dimension values dims = list(model.data_vars[mdat].dims) - ndim = model.data_vars[mdat].data.shape indices = {mod_time_name: mind[i]} # Construct the data needed for interpolation, ensuring that @@ -753,7 +752,7 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, if icycles < ncycles or icycles == 0: ss = [ii if k == 0 else 0 for k in range(idims)] se = [ii + 1 if k == 0 else - len(inst.data.coords[idim_names[k-1]]) + len(inst.data.coords[idim_names[k - 1]]) for k in range(idims)] xout = [cinds[ind_dims.index(k)] if k in ind_dims else slice(ss[k], se[k]) for k in range(idims)] @@ -779,11 +778,11 @@ def extract_modelled_observations(inst, model, inst_name, mod_name, k = 0 cinds[k] += 1 - while cinds[k] > \ - inst.data.coords.dims[inst_name[imod_dims[k]]]: + while cinds[k] > inst.data.coords.dims[ + inst_name[imod_dims[k]]]: k += 1 if k < len(cinds): - cinds[k-1] = 0 + cinds[k - 1] = 0 cinds[k] += 1 else: break diff --git a/pysatModels/utils/match.py b/pysatModels/utils/match.py index 2e3ba140..51012ce4 100644 --- a/pysatModels/utils/match.py +++ b/pysatModels/utils/match.py @@ -12,7 +12,6 @@ from __future__ import unicode_literals import datetime as dt -import logging import numpy as np from os import path @@ -183,7 +182,8 @@ def collect_inst_model_pairs(start, stop, tinc, inst, inst_download_kwargs={}, raise ValueError(estr) if len(mod_name) != len(mod_units): - raise ValueError('Must provide units for each model location attribute') + raise ValueError(''.join(['Must provide units for each model location', + ' attribute'])) if inst_clean_rout is None: raise ValueError('Need routine to clean the instrument data') @@ -194,12 +194,13 @@ def collect_inst_model_pairs(start, stop, tinc, inst, inst_download_kwargs={}, del inst_download_kwargs['skip_download'] # Download the instrument data, if needed and wanted - if not skip_download and (stop - start).days != len(inst.files[start:stop]): + if not skip_download and (stop + - start).days != len(inst.files[start:stop]): missing_times = [tt for tt in date_range(start, stop, freq='1D', closed='left') if tt not in inst.files[start:stop].index] for tt in missing_times: - inst.download(start=tt, stop=tt+DateOffset(days=1), + inst.download(start=tt, stop=tt + DateOffset(days=1), **inst_download_kwargs) # Cycle through the times, loading the model and instrument data as needed @@ -233,15 +234,12 @@ def collect_inst_model_pairs(start, stop, tinc, inst, inst_download_kwargs={}, inst.load(date=istart) if not inst.empty and inst.index[0] >= istart: - added_names = extract.extract_modelled_observations(inst=inst, - model=mdata, inst_name=inst_name, - mod_name=mod_name, - mod_datetime_name=mod_datetime_name, - mod_time_name=mod_time_name, - mod_units=mod_units, sel_name=sel_name, - time_method=time_method, method=method, - pair_method=pair_method, - model_label=model_label) + added_names = extract.extract_modelled_observations( + inst=inst, model=mdata, inst_name=inst_name, + mod_name=mod_name, mod_datetime_name=mod_datetime_name, + mod_time_name=mod_time_name, mod_units=mod_units, + sel_name=sel_name, time_method=time_method, method=method, + pair_method=pair_method, model_label=model_label) if len(added_names) > 0: # Clean the instrument data diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..473d43ec --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +flake8-ignore = + *.py W503 + docs/conf.py ALL diff --git a/setup.py b/setup.py index f1b704e9..67edda97 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", - "Operating System :: MacOS :: MacOS X",], + "Operating System :: MacOS :: MacOS X"], include_package_data=True, zip_safe=False, install_requires=install_requires,)